repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
linkedin/Zopkio | examples/zookeeper/zookeeper_example.py | 4 | 1235 | # Copyright 2015 LinkedIn Corp.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
test = {
"deployment_code": os.path.join(os.path.dirname(os.path.abspath(__file__)), "deployment.py"),
"test_code": [
os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_suites/zookeeper_basic.py")],
"perf_code": os.path.join(os.path.dirname(os.path.abspath(__file__)), "log_naarad.py"),
"configs_directory": os.path.join(os.path.dirname(os.path.abspath(__file__)), "config/")
}
| apache-2.0 | -6,807,697,375,829,529,000 | 43.107143 | 98 | 0.732794 | false |
nwokeo/supysonic | venv/lib/python2.7/site-packages/psycopg2/errorcodes.py | 7 | 13246 | """Error codes for PostgresSQL
This module contains symbolic names for all PostgreSQL error codes.
"""
# psycopg2/errorcodes.py - PostgreSQL error codes
#
# Copyright (C) 2006-2010 Johan Dahlin <[email protected]>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
#
# Based on:
#
# http://www.postgresql.org/docs/current/static/errcodes-appendix.html
#
def lookup(code, _cache={}):
"""Lookup an error code or class code and return its symbolic name.
Raise `KeyError` if the code is not found.
"""
if _cache:
return _cache[code]
# Generate the lookup map at first usage.
tmp = {}
for k, v in globals().iteritems():
if isinstance(v, str) and len(v) in (2, 5):
tmp[v] = k
assert tmp
# Atomic update, to avoid race condition on import (bug #382)
_cache.update(tmp)
return _cache[code]
# autogenerated data: do not edit below this point.
# Error classes
CLASS_SUCCESSFUL_COMPLETION = '00'
CLASS_WARNING = '01'
CLASS_NO_DATA = '02'
CLASS_SQL_STATEMENT_NOT_YET_COMPLETE = '03'
CLASS_CONNECTION_EXCEPTION = '08'
CLASS_TRIGGERED_ACTION_EXCEPTION = '09'
CLASS_FEATURE_NOT_SUPPORTED = '0A'
CLASS_INVALID_TRANSACTION_INITIATION = '0B'
CLASS_LOCATOR_EXCEPTION = '0F'
CLASS_INVALID_GRANTOR = '0L'
CLASS_INVALID_ROLE_SPECIFICATION = '0P'
CLASS_DIAGNOSTICS_EXCEPTION = '0Z'
CLASS_CASE_NOT_FOUND = '20'
CLASS_CARDINALITY_VIOLATION = '21'
CLASS_DATA_EXCEPTION = '22'
CLASS_INTEGRITY_CONSTRAINT_VIOLATION = '23'
CLASS_INVALID_CURSOR_STATE = '24'
CLASS_INVALID_TRANSACTION_STATE = '25'
CLASS_INVALID_SQL_STATEMENT_NAME = '26'
CLASS_TRIGGERED_DATA_CHANGE_VIOLATION = '27'
CLASS_INVALID_AUTHORIZATION_SPECIFICATION = '28'
CLASS_DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST = '2B'
CLASS_INVALID_TRANSACTION_TERMINATION = '2D'
CLASS_SQL_ROUTINE_EXCEPTION = '2F'
CLASS_INVALID_CURSOR_NAME = '34'
CLASS_EXTERNAL_ROUTINE_EXCEPTION = '38'
CLASS_EXTERNAL_ROUTINE_INVOCATION_EXCEPTION = '39'
CLASS_SAVEPOINT_EXCEPTION = '3B'
CLASS_INVALID_CATALOG_NAME = '3D'
CLASS_INVALID_SCHEMA_NAME = '3F'
CLASS_TRANSACTION_ROLLBACK = '40'
CLASS_SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION = '42'
CLASS_WITH_CHECK_OPTION_VIOLATION = '44'
CLASS_INSUFFICIENT_RESOURCES = '53'
CLASS_PROGRAM_LIMIT_EXCEEDED = '54'
CLASS_OBJECT_NOT_IN_PREREQUISITE_STATE = '55'
CLASS_OPERATOR_INTERVENTION = '57'
CLASS_SYSTEM_ERROR = '58'
CLASS_CONFIGURATION_FILE_ERROR = 'F0'
CLASS_FOREIGN_DATA_WRAPPER_ERROR = 'HV'
CLASS_PL_PGSQL_ERROR = 'P0'
CLASS_INTERNAL_ERROR = 'XX'
# Class 00 - Successful Completion
SUCCESSFUL_COMPLETION = '00000'
# Class 01 - Warning
WARNING = '01000'
NULL_VALUE_ELIMINATED_IN_SET_FUNCTION = '01003'
STRING_DATA_RIGHT_TRUNCATION = '01004'
PRIVILEGE_NOT_REVOKED = '01006'
PRIVILEGE_NOT_GRANTED = '01007'
IMPLICIT_ZERO_BIT_PADDING = '01008'
DYNAMIC_RESULT_SETS_RETURNED = '0100C'
DEPRECATED_FEATURE = '01P01'
# Class 02 - No Data (this is also a warning class per the SQL standard)
NO_DATA = '02000'
NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED = '02001'
# Class 03 - SQL Statement Not Yet Complete
SQL_STATEMENT_NOT_YET_COMPLETE = '03000'
# Class 08 - Connection Exception
CONNECTION_EXCEPTION = '08000'
SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION = '08001'
CONNECTION_DOES_NOT_EXIST = '08003'
SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION = '08004'
CONNECTION_FAILURE = '08006'
TRANSACTION_RESOLUTION_UNKNOWN = '08007'
PROTOCOL_VIOLATION = '08P01'
# Class 09 - Triggered Action Exception
TRIGGERED_ACTION_EXCEPTION = '09000'
# Class 0A - Feature Not Supported
FEATURE_NOT_SUPPORTED = '0A000'
# Class 0B - Invalid Transaction Initiation
INVALID_TRANSACTION_INITIATION = '0B000'
# Class 0F - Locator Exception
LOCATOR_EXCEPTION = '0F000'
INVALID_LOCATOR_SPECIFICATION = '0F001'
# Class 0L - Invalid Grantor
INVALID_GRANTOR = '0L000'
INVALID_GRANT_OPERATION = '0LP01'
# Class 0P - Invalid Role Specification
INVALID_ROLE_SPECIFICATION = '0P000'
# Class 0Z - Diagnostics Exception
DIAGNOSTICS_EXCEPTION = '0Z000'
STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER = '0Z002'
# Class 20 - Case Not Found
CASE_NOT_FOUND = '20000'
# Class 21 - Cardinality Violation
CARDINALITY_VIOLATION = '21000'
# Class 22 - Data Exception
DATA_EXCEPTION = '22000'
STRING_DATA_RIGHT_TRUNCATION = '22001'
NULL_VALUE_NO_INDICATOR_PARAMETER = '22002'
NUMERIC_VALUE_OUT_OF_RANGE = '22003'
NULL_VALUE_NOT_ALLOWED = '22004'
ERROR_IN_ASSIGNMENT = '22005'
INVALID_DATETIME_FORMAT = '22007'
DATETIME_FIELD_OVERFLOW = '22008'
INVALID_TIME_ZONE_DISPLACEMENT_VALUE = '22009'
ESCAPE_CHARACTER_CONFLICT = '2200B'
INVALID_USE_OF_ESCAPE_CHARACTER = '2200C'
INVALID_ESCAPE_OCTET = '2200D'
ZERO_LENGTH_CHARACTER_STRING = '2200F'
MOST_SPECIFIC_TYPE_MISMATCH = '2200G'
NOT_AN_XML_DOCUMENT = '2200L'
INVALID_XML_DOCUMENT = '2200M'
INVALID_XML_CONTENT = '2200N'
INVALID_XML_COMMENT = '2200S'
INVALID_XML_PROCESSING_INSTRUCTION = '2200T'
INVALID_INDICATOR_PARAMETER_VALUE = '22010'
SUBSTRING_ERROR = '22011'
DIVISION_BY_ZERO = '22012'
INVALID_ARGUMENT_FOR_NTILE_FUNCTION = '22014'
INTERVAL_FIELD_OVERFLOW = '22015'
INVALID_ARGUMENT_FOR_NTH_VALUE_FUNCTION = '22016'
INVALID_CHARACTER_VALUE_FOR_CAST = '22018'
INVALID_ESCAPE_CHARACTER = '22019'
INVALID_REGULAR_EXPRESSION = '2201B'
INVALID_ARGUMENT_FOR_LOGARITHM = '2201E'
INVALID_ARGUMENT_FOR_POWER_FUNCTION = '2201F'
INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION = '2201G'
INVALID_ROW_COUNT_IN_LIMIT_CLAUSE = '2201W'
INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE = '2201X'
INVALID_LIMIT_VALUE = '22020'
CHARACTER_NOT_IN_REPERTOIRE = '22021'
INDICATOR_OVERFLOW = '22022'
INVALID_PARAMETER_VALUE = '22023'
UNTERMINATED_C_STRING = '22024'
INVALID_ESCAPE_SEQUENCE = '22025'
STRING_DATA_LENGTH_MISMATCH = '22026'
TRIM_ERROR = '22027'
ARRAY_SUBSCRIPT_ERROR = '2202E'
INVALID_TABLESAMPLE_REPEAT = '2202G'
INVALID_TABLESAMPLE_ARGUMENT = '2202H'
FLOATING_POINT_EXCEPTION = '22P01'
INVALID_TEXT_REPRESENTATION = '22P02'
INVALID_BINARY_REPRESENTATION = '22P03'
BAD_COPY_FILE_FORMAT = '22P04'
UNTRANSLATABLE_CHARACTER = '22P05'
NONSTANDARD_USE_OF_ESCAPE_CHARACTER = '22P06'
# Class 23 - Integrity Constraint Violation
INTEGRITY_CONSTRAINT_VIOLATION = '23000'
RESTRICT_VIOLATION = '23001'
NOT_NULL_VIOLATION = '23502'
FOREIGN_KEY_VIOLATION = '23503'
UNIQUE_VIOLATION = '23505'
CHECK_VIOLATION = '23514'
EXCLUSION_VIOLATION = '23P01'
# Class 24 - Invalid Cursor State
INVALID_CURSOR_STATE = '24000'
# Class 25 - Invalid Transaction State
INVALID_TRANSACTION_STATE = '25000'
ACTIVE_SQL_TRANSACTION = '25001'
BRANCH_TRANSACTION_ALREADY_ACTIVE = '25002'
INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION = '25003'
INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION = '25004'
NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION = '25005'
READ_ONLY_SQL_TRANSACTION = '25006'
SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED = '25007'
HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL = '25008'
NO_ACTIVE_SQL_TRANSACTION = '25P01'
IN_FAILED_SQL_TRANSACTION = '25P02'
# Class 26 - Invalid SQL Statement Name
INVALID_SQL_STATEMENT_NAME = '26000'
# Class 27 - Triggered Data Change Violation
TRIGGERED_DATA_CHANGE_VIOLATION = '27000'
# Class 28 - Invalid Authorization Specification
INVALID_AUTHORIZATION_SPECIFICATION = '28000'
INVALID_PASSWORD = '28P01'
# Class 2B - Dependent Privilege Descriptors Still Exist
DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST = '2B000'
DEPENDENT_OBJECTS_STILL_EXIST = '2BP01'
# Class 2D - Invalid Transaction Termination
INVALID_TRANSACTION_TERMINATION = '2D000'
# Class 2F - SQL Routine Exception
SQL_ROUTINE_EXCEPTION = '2F000'
MODIFYING_SQL_DATA_NOT_PERMITTED = '2F002'
PROHIBITED_SQL_STATEMENT_ATTEMPTED = '2F003'
READING_SQL_DATA_NOT_PERMITTED = '2F004'
FUNCTION_EXECUTED_NO_RETURN_STATEMENT = '2F005'
# Class 34 - Invalid Cursor Name
INVALID_CURSOR_NAME = '34000'
# Class 38 - External Routine Exception
EXTERNAL_ROUTINE_EXCEPTION = '38000'
CONTAINING_SQL_NOT_PERMITTED = '38001'
MODIFYING_SQL_DATA_NOT_PERMITTED = '38002'
PROHIBITED_SQL_STATEMENT_ATTEMPTED = '38003'
READING_SQL_DATA_NOT_PERMITTED = '38004'
# Class 39 - External Routine Invocation Exception
EXTERNAL_ROUTINE_INVOCATION_EXCEPTION = '39000'
INVALID_SQLSTATE_RETURNED = '39001'
NULL_VALUE_NOT_ALLOWED = '39004'
TRIGGER_PROTOCOL_VIOLATED = '39P01'
SRF_PROTOCOL_VIOLATED = '39P02'
EVENT_TRIGGER_PROTOCOL_VIOLATED = '39P03'
# Class 3B - Savepoint Exception
SAVEPOINT_EXCEPTION = '3B000'
INVALID_SAVEPOINT_SPECIFICATION = '3B001'
# Class 3D - Invalid Catalog Name
INVALID_CATALOG_NAME = '3D000'
# Class 3F - Invalid Schema Name
INVALID_SCHEMA_NAME = '3F000'
# Class 40 - Transaction Rollback
TRANSACTION_ROLLBACK = '40000'
SERIALIZATION_FAILURE = '40001'
TRANSACTION_INTEGRITY_CONSTRAINT_VIOLATION = '40002'
STATEMENT_COMPLETION_UNKNOWN = '40003'
DEADLOCK_DETECTED = '40P01'
# Class 42 - Syntax Error or Access Rule Violation
SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION = '42000'
INSUFFICIENT_PRIVILEGE = '42501'
SYNTAX_ERROR = '42601'
INVALID_NAME = '42602'
INVALID_COLUMN_DEFINITION = '42611'
NAME_TOO_LONG = '42622'
DUPLICATE_COLUMN = '42701'
AMBIGUOUS_COLUMN = '42702'
UNDEFINED_COLUMN = '42703'
UNDEFINED_OBJECT = '42704'
DUPLICATE_OBJECT = '42710'
DUPLICATE_ALIAS = '42712'
DUPLICATE_FUNCTION = '42723'
AMBIGUOUS_FUNCTION = '42725'
GROUPING_ERROR = '42803'
DATATYPE_MISMATCH = '42804'
WRONG_OBJECT_TYPE = '42809'
INVALID_FOREIGN_KEY = '42830'
CANNOT_COERCE = '42846'
UNDEFINED_FUNCTION = '42883'
RESERVED_NAME = '42939'
UNDEFINED_TABLE = '42P01'
UNDEFINED_PARAMETER = '42P02'
DUPLICATE_CURSOR = '42P03'
DUPLICATE_DATABASE = '42P04'
DUPLICATE_PREPARED_STATEMENT = '42P05'
DUPLICATE_SCHEMA = '42P06'
DUPLICATE_TABLE = '42P07'
AMBIGUOUS_PARAMETER = '42P08'
AMBIGUOUS_ALIAS = '42P09'
INVALID_COLUMN_REFERENCE = '42P10'
INVALID_CURSOR_DEFINITION = '42P11'
INVALID_DATABASE_DEFINITION = '42P12'
INVALID_FUNCTION_DEFINITION = '42P13'
INVALID_PREPARED_STATEMENT_DEFINITION = '42P14'
INVALID_SCHEMA_DEFINITION = '42P15'
INVALID_TABLE_DEFINITION = '42P16'
INVALID_OBJECT_DEFINITION = '42P17'
INDETERMINATE_DATATYPE = '42P18'
INVALID_RECURSION = '42P19'
WINDOWING_ERROR = '42P20'
COLLATION_MISMATCH = '42P21'
INDETERMINATE_COLLATION = '42P22'
# Class 44 - WITH CHECK OPTION Violation
WITH_CHECK_OPTION_VIOLATION = '44000'
# Class 53 - Insufficient Resources
INSUFFICIENT_RESOURCES = '53000'
DISK_FULL = '53100'
OUT_OF_MEMORY = '53200'
TOO_MANY_CONNECTIONS = '53300'
CONFIGURATION_LIMIT_EXCEEDED = '53400'
# Class 54 - Program Limit Exceeded
PROGRAM_LIMIT_EXCEEDED = '54000'
STATEMENT_TOO_COMPLEX = '54001'
TOO_MANY_COLUMNS = '54011'
TOO_MANY_ARGUMENTS = '54023'
# Class 55 - Object Not In Prerequisite State
OBJECT_NOT_IN_PREREQUISITE_STATE = '55000'
OBJECT_IN_USE = '55006'
CANT_CHANGE_RUNTIME_PARAM = '55P02'
LOCK_NOT_AVAILABLE = '55P03'
# Class 57 - Operator Intervention
OPERATOR_INTERVENTION = '57000'
QUERY_CANCELED = '57014'
ADMIN_SHUTDOWN = '57P01'
CRASH_SHUTDOWN = '57P02'
CANNOT_CONNECT_NOW = '57P03'
DATABASE_DROPPED = '57P04'
# Class 58 - System Error (errors external to PostgreSQL itself)
SYSTEM_ERROR = '58000'
IO_ERROR = '58030'
UNDEFINED_FILE = '58P01'
DUPLICATE_FILE = '58P02'
# Class F0 - Configuration File Error
CONFIG_FILE_ERROR = 'F0000'
LOCK_FILE_EXISTS = 'F0001'
# Class HV - Foreign Data Wrapper Error (SQL/MED)
FDW_ERROR = 'HV000'
FDW_OUT_OF_MEMORY = 'HV001'
FDW_DYNAMIC_PARAMETER_VALUE_NEEDED = 'HV002'
FDW_INVALID_DATA_TYPE = 'HV004'
FDW_COLUMN_NAME_NOT_FOUND = 'HV005'
FDW_INVALID_DATA_TYPE_DESCRIPTORS = 'HV006'
FDW_INVALID_COLUMN_NAME = 'HV007'
FDW_INVALID_COLUMN_NUMBER = 'HV008'
FDW_INVALID_USE_OF_NULL_POINTER = 'HV009'
FDW_INVALID_STRING_FORMAT = 'HV00A'
FDW_INVALID_HANDLE = 'HV00B'
FDW_INVALID_OPTION_INDEX = 'HV00C'
FDW_INVALID_OPTION_NAME = 'HV00D'
FDW_OPTION_NAME_NOT_FOUND = 'HV00J'
FDW_REPLY_HANDLE = 'HV00K'
FDW_UNABLE_TO_CREATE_EXECUTION = 'HV00L'
FDW_UNABLE_TO_CREATE_REPLY = 'HV00M'
FDW_UNABLE_TO_ESTABLISH_CONNECTION = 'HV00N'
FDW_NO_SCHEMAS = 'HV00P'
FDW_SCHEMA_NOT_FOUND = 'HV00Q'
FDW_TABLE_NOT_FOUND = 'HV00R'
FDW_FUNCTION_SEQUENCE_ERROR = 'HV010'
FDW_TOO_MANY_HANDLES = 'HV014'
FDW_INCONSISTENT_DESCRIPTOR_INFORMATION = 'HV021'
FDW_INVALID_ATTRIBUTE_VALUE = 'HV024'
FDW_INVALID_STRING_LENGTH_OR_BUFFER_LENGTH = 'HV090'
FDW_INVALID_DESCRIPTOR_FIELD_IDENTIFIER = 'HV091'
# Class P0 - PL/pgSQL Error
PLPGSQL_ERROR = 'P0000'
RAISE_EXCEPTION = 'P0001'
NO_DATA_FOUND = 'P0002'
TOO_MANY_ROWS = 'P0003'
ASSERT_FAILURE = 'P0004'
# Class XX - Internal Error
INTERNAL_ERROR = 'XX000'
DATA_CORRUPTED = 'XX001'
INDEX_CORRUPTED = 'XX002'
| agpl-3.0 | -9,134,851,522,338,565,000 | 30.538095 | 73 | 0.75185 | false |
samuelhavron/heroku-buildpack-python | Python-3.4.3/Lib/test/test_itertools.py | 8 | 89289 | import unittest
from test import support
from itertools import *
import weakref
from decimal import Decimal
from fractions import Fraction
import sys
import operator
import random
import copy
import pickle
from functools import reduce
import sys
import struct
maxsize = support.MAX_Py_ssize_t
minsize = -maxsize-1
def lzip(*args):
return list(zip(*args))
def onearg(x):
'Test function of one argument'
return 2*x
def errfunc(*args):
'Test function that raises an error'
raise ValueError
def gen3():
'Non-restartable source sequence'
for i in (0, 1, 2):
yield i
def isEven(x):
'Test predicate'
return x%2==0
def isOdd(x):
'Test predicate'
return x%2==1
def tupleize(*args):
return args
def irange(n):
for i in range(n):
yield i
class StopNow:
'Class emulating an empty iterable.'
def __iter__(self):
return self
def __next__(self):
raise StopIteration
def take(n, seq):
'Convenience function for partially consuming a long of infinite iterable'
return list(islice(seq, n))
def prod(iterable):
return reduce(operator.mul, iterable, 1)
def fact(n):
'Factorial'
return prod(range(1, n+1))
# root level methods for pickling ability
def testR(r):
return r[0]
def testR2(r):
return r[2]
def underten(x):
return x<10
picklecopiers = [lambda s, proto=proto: pickle.loads(pickle.dumps(s, proto))
for proto in range(pickle.HIGHEST_PROTOCOL + 1)]
class TestBasicOps(unittest.TestCase):
def pickletest(self, protocol, it, stop=4, take=1, compare=None):
"""Test that an iterator is the same after pickling, also when part-consumed"""
def expand(it, i=0):
# Recursively expand iterables, within sensible bounds
if i > 10:
raise RuntimeError("infinite recursion encountered")
if isinstance(it, str):
return it
try:
l = list(islice(it, stop))
except TypeError:
return it # can't expand it
return [expand(e, i+1) for e in l]
# Test the initial copy against the original
dump = pickle.dumps(it, protocol)
i2 = pickle.loads(dump)
self.assertEqual(type(it), type(i2))
a, b = expand(it), expand(i2)
self.assertEqual(a, b)
if compare:
c = expand(compare)
self.assertEqual(a, c)
# Take from the copy, and create another copy and compare them.
i3 = pickle.loads(dump)
took = 0
try:
for i in range(take):
next(i3)
took += 1
except StopIteration:
pass #in case there is less data than 'take'
dump = pickle.dumps(i3, protocol)
i4 = pickle.loads(dump)
a, b = expand(i3), expand(i4)
self.assertEqual(a, b)
if compare:
c = expand(compare[took:])
self.assertEqual(a, c);
def test_accumulate(self):
self.assertEqual(list(accumulate(range(10))), # one positional arg
[0, 1, 3, 6, 10, 15, 21, 28, 36, 45])
self.assertEqual(list(accumulate(iterable=range(10))), # kw arg
[0, 1, 3, 6, 10, 15, 21, 28, 36, 45])
for typ in int, complex, Decimal, Fraction: # multiple types
self.assertEqual(
list(accumulate(map(typ, range(10)))),
list(map(typ, [0, 1, 3, 6, 10, 15, 21, 28, 36, 45])))
self.assertEqual(list(accumulate('abc')), ['a', 'ab', 'abc']) # works with non-numeric
self.assertEqual(list(accumulate([])), []) # empty iterable
self.assertEqual(list(accumulate([7])), [7]) # iterable of length one
self.assertRaises(TypeError, accumulate, range(10), 5, 6) # too many args
self.assertRaises(TypeError, accumulate) # too few args
self.assertRaises(TypeError, accumulate, x=range(10)) # unexpected kwd arg
self.assertRaises(TypeError, list, accumulate([1, []])) # args that don't add
s = [2, 8, 9, 5, 7, 0, 3, 4, 1, 6]
self.assertEqual(list(accumulate(s, min)),
[2, 2, 2, 2, 2, 0, 0, 0, 0, 0])
self.assertEqual(list(accumulate(s, max)),
[2, 8, 9, 9, 9, 9, 9, 9, 9, 9])
self.assertEqual(list(accumulate(s, operator.mul)),
[2, 16, 144, 720, 5040, 0, 0, 0, 0, 0])
with self.assertRaises(TypeError):
list(accumulate(s, chr)) # unary-operation
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, accumulate(range(10))) # test pickling
def test_chain(self):
def chain2(*iterables):
'Pure python version in the docs'
for it in iterables:
for element in it:
yield element
for c in (chain, chain2):
self.assertEqual(list(c('abc', 'def')), list('abcdef'))
self.assertEqual(list(c('abc')), list('abc'))
self.assertEqual(list(c('')), [])
self.assertEqual(take(4, c('abc', 'def')), list('abcd'))
self.assertRaises(TypeError, list,c(2, 3))
def test_chain_from_iterable(self):
self.assertEqual(list(chain.from_iterable(['abc', 'def'])), list('abcdef'))
self.assertEqual(list(chain.from_iterable(['abc'])), list('abc'))
self.assertEqual(list(chain.from_iterable([''])), [])
self.assertEqual(take(4, chain.from_iterable(['abc', 'def'])), list('abcd'))
self.assertRaises(TypeError, list, chain.from_iterable([2, 3]))
def test_chain_reducible(self):
for oper in [copy.deepcopy] + picklecopiers:
it = chain('abc', 'def')
self.assertEqual(list(oper(it)), list('abcdef'))
self.assertEqual(next(it), 'a')
self.assertEqual(list(oper(it)), list('bcdef'))
self.assertEqual(list(oper(chain(''))), [])
self.assertEqual(take(4, oper(chain('abc', 'def'))), list('abcd'))
self.assertRaises(TypeError, list, oper(chain(2, 3)))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, chain('abc', 'def'), compare=list('abcdef'))
def test_combinations(self):
self.assertRaises(TypeError, combinations, 'abc') # missing r argument
self.assertRaises(TypeError, combinations, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, combinations, None) # pool is not iterable
self.assertRaises(ValueError, combinations, 'abc', -2) # r is negative
for op in [lambda a:a] + picklecopiers:
self.assertEqual(list(op(combinations('abc', 32))), []) # r > n
self.assertEqual(list(op(combinations('ABCD', 2))),
[('A','B'), ('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')])
testIntermediate = combinations('ABCD', 2)
next(testIntermediate)
self.assertEqual(list(op(testIntermediate)),
[('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')])
self.assertEqual(list(op(combinations(range(4), 3))),
[(0,1,2), (0,1,3), (0,2,3), (1,2,3)])
testIntermediate = combinations(range(4), 3)
next(testIntermediate)
self.assertEqual(list(op(testIntermediate)),
[(0,1,3), (0,2,3), (1,2,3)])
def combinations1(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = list(range(r))
yield tuple(pool[i] for i in indices)
while 1:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i+1, r):
indices[j] = indices[j-1] + 1
yield tuple(pool[i] for i in indices)
def combinations2(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
for indices in permutations(range(n), r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def combinations3(iterable, r):
'Pure python version from cwr()'
pool = tuple(iterable)
n = len(pool)
for indices in combinations_with_replacement(range(n), r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(combinations(values, r))
self.assertEqual(len(result), 0 if r>n else fact(n) / fact(r) / fact(n-r)) # right number of combs
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
for c in result:
self.assertEqual(len(c), r) # r-length combinations
self.assertEqual(len(set(c)), r) # no duplicate elements
self.assertEqual(list(c), sorted(c)) # keep original ordering
self.assertTrue(all(e in values for e in c)) # elements taken from input iterable
self.assertEqual(list(c),
[e for e in values if e in c]) # comb is a subsequence of the input iterable
self.assertEqual(result, list(combinations1(values, r))) # matches first pure python version
self.assertEqual(result, list(combinations2(values, r))) # matches second pure python version
self.assertEqual(result, list(combinations3(values, r))) # matches second pure python version
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, combinations(values, r)) # test pickling
@support.bigaddrspacetest
def test_combinations_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
combinations("AA", 2**29)
# Test implementation detail: tuple re-use
@support.impl_detail("tuple reuse is specific to CPython")
def test_combinations_tuple_reuse(self):
self.assertEqual(len(set(map(id, combinations('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(combinations('abcde', 3))))), 1)
def test_combinations_with_replacement(self):
cwr = combinations_with_replacement
self.assertRaises(TypeError, cwr, 'abc') # missing r argument
self.assertRaises(TypeError, cwr, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, cwr, None) # pool is not iterable
self.assertRaises(ValueError, cwr, 'abc', -2) # r is negative
for op in [lambda a:a] + picklecopiers:
self.assertEqual(list(op(cwr('ABC', 2))),
[('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
testIntermediate = cwr('ABC', 2)
next(testIntermediate)
self.assertEqual(list(op(testIntermediate)),
[('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
def cwr1(iterable, r):
'Pure python version shown in the docs'
# number items returned: (n+r-1)! / r! / (n-1)! when n>0
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while 1:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
def cwr2(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
for indices in product(range(n), repeat=r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def numcombs(n, r):
if not n:
return 0 if r else 1
return fact(n+r-1) / fact(r)/ fact(n-1)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(cwr(values, r))
self.assertEqual(len(result), numcombs(n, r)) # right number of combs
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
regular_combs = list(combinations(values, r)) # compare to combs without replacement
if n == 0 or r <= 1:
self.assertEqual(result, regular_combs) # cases that should be identical
else:
self.assertTrue(set(result) >= set(regular_combs)) # rest should be supersets of regular combs
for c in result:
self.assertEqual(len(c), r) # r-length combinations
noruns = [k for k,v in groupby(c)] # combo without consecutive repeats
self.assertEqual(len(noruns), len(set(noruns))) # no repeats other than consecutive
self.assertEqual(list(c), sorted(c)) # keep original ordering
self.assertTrue(all(e in values for e in c)) # elements taken from input iterable
self.assertEqual(noruns,
[e for e in values if e in c]) # comb is a subsequence of the input iterable
self.assertEqual(result, list(cwr1(values, r))) # matches first pure python version
self.assertEqual(result, list(cwr2(values, r))) # matches second pure python version
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, cwr(values,r)) # test pickling
@support.bigaddrspacetest
def test_combinations_with_replacement_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
combinations_with_replacement("AA", 2**30)
# Test implementation detail: tuple re-use
@support.impl_detail("tuple reuse is specific to CPython")
def test_combinations_with_replacement_tuple_reuse(self):
cwr = combinations_with_replacement
self.assertEqual(len(set(map(id, cwr('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(cwr('abcde', 3))))), 1)
def test_permutations(self):
self.assertRaises(TypeError, permutations) # too few arguments
self.assertRaises(TypeError, permutations, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, permutations, None) # pool is not iterable
self.assertRaises(ValueError, permutations, 'abc', -2) # r is negative
self.assertEqual(list(permutations('abc', 32)), []) # r > n
self.assertRaises(TypeError, permutations, 'abc', 's') # r is not an int or None
self.assertEqual(list(permutations(range(3), 2)),
[(0,1), (0,2), (1,0), (1,2), (2,0), (2,1)])
def permutations1(iterable, r=None):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
if r > n:
return
indices = list(range(n))
cycles = list(range(n-r+1, n+1))[::-1]
yield tuple(pool[i] for i in indices[:r])
while n:
for i in reversed(range(r)):
cycles[i] -= 1
if cycles[i] == 0:
indices[i:] = indices[i+1:] + indices[i:i+1]
cycles[i] = n - i
else:
j = cycles[i]
indices[i], indices[-j] = indices[-j], indices[i]
yield tuple(pool[i] for i in indices[:r])
break
else:
return
def permutations2(iterable, r=None):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
for indices in product(range(n), repeat=r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(permutations(values, r))
self.assertEqual(len(result), 0 if r>n else fact(n) / fact(n-r)) # right number of perms
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
for p in result:
self.assertEqual(len(p), r) # r-length permutations
self.assertEqual(len(set(p)), r) # no duplicate elements
self.assertTrue(all(e in values for e in p)) # elements taken from input iterable
self.assertEqual(result, list(permutations1(values, r))) # matches first pure python version
self.assertEqual(result, list(permutations2(values, r))) # matches second pure python version
if r == n:
self.assertEqual(result, list(permutations(values, None))) # test r as None
self.assertEqual(result, list(permutations(values))) # test default r
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, permutations(values, r)) # test pickling
@support.bigaddrspacetest
def test_permutations_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
permutations("A", 2**30)
@support.impl_detail("tuple reuse is specific to CPython")
def test_permutations_tuple_reuse(self):
self.assertEqual(len(set(map(id, permutations('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(permutations('abcde', 3))))), 1)
def test_combinatorics(self):
# Test relationships between product(), permutations(),
# combinations() and combinations_with_replacement().
for n in range(6):
s = 'ABCDEFG'[:n]
for r in range(8):
prod = list(product(s, repeat=r))
cwr = list(combinations_with_replacement(s, r))
perm = list(permutations(s, r))
comb = list(combinations(s, r))
# Check size
self.assertEqual(len(prod), n**r)
self.assertEqual(len(cwr), (fact(n+r-1) / fact(r)/ fact(n-1)) if n else (not r))
self.assertEqual(len(perm), 0 if r>n else fact(n) / fact(n-r))
self.assertEqual(len(comb), 0 if r>n else fact(n) / fact(r) / fact(n-r))
# Check lexicographic order without repeated tuples
self.assertEqual(prod, sorted(set(prod)))
self.assertEqual(cwr, sorted(set(cwr)))
self.assertEqual(perm, sorted(set(perm)))
self.assertEqual(comb, sorted(set(comb)))
# Check interrelationships
self.assertEqual(cwr, [t for t in prod if sorted(t)==list(t)]) # cwr: prods which are sorted
self.assertEqual(perm, [t for t in prod if len(set(t))==r]) # perm: prods with no dups
self.assertEqual(comb, [t for t in perm if sorted(t)==list(t)]) # comb: perms that are sorted
self.assertEqual(comb, [t for t in cwr if len(set(t))==r]) # comb: cwrs without dups
self.assertEqual(comb, list(filter(set(cwr).__contains__, perm))) # comb: perm that is a cwr
self.assertEqual(comb, list(filter(set(perm).__contains__, cwr))) # comb: cwr that is a perm
self.assertEqual(comb, sorted(set(cwr) & set(perm))) # comb: both a cwr and a perm
def test_compress(self):
self.assertEqual(list(compress(data='ABCDEF', selectors=[1,0,1,0,1,1])), list('ACEF'))
self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF'))
self.assertEqual(list(compress('ABCDEF', [0,0,0,0,0,0])), list(''))
self.assertEqual(list(compress('ABCDEF', [1,1,1,1,1,1])), list('ABCDEF'))
self.assertEqual(list(compress('ABCDEF', [1,0,1])), list('AC'))
self.assertEqual(list(compress('ABC', [0,1,1,1,1,1])), list('BC'))
n = 10000
data = chain.from_iterable(repeat(range(6), n))
selectors = chain.from_iterable(repeat((0, 1)))
self.assertEqual(list(compress(data, selectors)), [1,3,5] * n)
self.assertRaises(TypeError, compress, None, range(6)) # 1st arg not iterable
self.assertRaises(TypeError, compress, range(6), None) # 2nd arg not iterable
self.assertRaises(TypeError, compress, range(6)) # too few args
self.assertRaises(TypeError, compress, range(6), None) # too many args
# check copy, deepcopy, pickle
for op in [lambda a:copy.copy(a), lambda a:copy.deepcopy(a)] + picklecopiers:
for data, selectors, result1, result2 in [
('ABCDEF', [1,0,1,0,1,1], 'ACEF', 'CEF'),
('ABCDEF', [0,0,0,0,0,0], '', ''),
('ABCDEF', [1,1,1,1,1,1], 'ABCDEF', 'BCDEF'),
('ABCDEF', [1,0,1], 'AC', 'C'),
('ABC', [0,1,1,1,1,1], 'BC', 'C'),
]:
self.assertEqual(list(op(compress(data=data, selectors=selectors))), list(result1))
self.assertEqual(list(op(compress(data, selectors))), list(result1))
testIntermediate = compress(data, selectors)
if result1:
next(testIntermediate)
self.assertEqual(list(op(testIntermediate)), list(result2))
def test_count(self):
self.assertEqual(lzip('abc',count()), [('a', 0), ('b', 1), ('c', 2)])
self.assertEqual(lzip('abc',count(3)), [('a', 3), ('b', 4), ('c', 5)])
self.assertEqual(take(2, lzip('abc',count(3))), [('a', 3), ('b', 4)])
self.assertEqual(take(2, zip('abc',count(-1))), [('a', -1), ('b', 0)])
self.assertEqual(take(2, zip('abc',count(-3))), [('a', -3), ('b', -2)])
self.assertRaises(TypeError, count, 2, 3, 4)
self.assertRaises(TypeError, count, 'a')
self.assertEqual(list(islice(count(maxsize-5), 10)),
list(range(maxsize-5, maxsize+5)))
self.assertEqual(list(islice(count(-maxsize-5), 10)),
list(range(-maxsize-5, -maxsize+5)))
self.assertEqual(list(islice(count(10, maxsize+5), 3)),
list(range(10, 10+3*(maxsize+5), maxsize+5)))
c = count(3)
self.assertEqual(repr(c), 'count(3)')
next(c)
self.assertEqual(repr(c), 'count(4)')
c = count(-9)
self.assertEqual(repr(c), 'count(-9)')
next(c)
self.assertEqual(repr(count(10.25)), 'count(10.25)')
self.assertEqual(next(c), -8)
for i in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 10, sys.maxsize-5, sys.maxsize+5):
# Test repr
r1 = repr(count(i))
r2 = 'count(%r)'.__mod__(i)
self.assertEqual(r1, r2)
# check copy, deepcopy, pickle
for value in -3, 3, maxsize-5, maxsize+5:
c = count(value)
self.assertEqual(next(copy.copy(c)), value)
self.assertEqual(next(copy.deepcopy(c)), value)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, count(value))
#check proper internal error handling for large "step' sizes
count(1, maxsize+5); sys.exc_info()
def test_count_with_stride(self):
self.assertEqual(lzip('abc',count(2,3)), [('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(lzip('abc',count(start=2,step=3)),
[('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(lzip('abc',count(step=-1)),
[('a', 0), ('b', -1), ('c', -2)])
self.assertEqual(lzip('abc',count(2,0)), [('a', 2), ('b', 2), ('c', 2)])
self.assertEqual(lzip('abc',count(2,1)), [('a', 2), ('b', 3), ('c', 4)])
self.assertEqual(lzip('abc',count(2,3)), [('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(take(20, count(maxsize-15, 3)), take(20, range(maxsize-15, maxsize+100, 3)))
self.assertEqual(take(20, count(-maxsize-15, 3)), take(20, range(-maxsize-15,-maxsize+100, 3)))
self.assertEqual(take(3, count(2, 3.25-4j)), [2, 5.25-4j, 8.5-8j])
self.assertEqual(take(3, count(Decimal('1.1'), Decimal('.1'))),
[Decimal('1.1'), Decimal('1.2'), Decimal('1.3')])
self.assertEqual(take(3, count(Fraction(2,3), Fraction(1,7))),
[Fraction(2,3), Fraction(17,21), Fraction(20,21)])
self.assertEqual(repr(take(3, count(10, 2.5))), repr([10, 12.5, 15.0]))
c = count(3, 5)
self.assertEqual(repr(c), 'count(3, 5)')
next(c)
self.assertEqual(repr(c), 'count(8, 5)')
c = count(-9, 0)
self.assertEqual(repr(c), 'count(-9, 0)')
next(c)
self.assertEqual(repr(c), 'count(-9, 0)')
c = count(-9, -3)
self.assertEqual(repr(c), 'count(-9, -3)')
next(c)
self.assertEqual(repr(c), 'count(-12, -3)')
self.assertEqual(repr(c), 'count(-12, -3)')
self.assertEqual(repr(count(10.5, 1.25)), 'count(10.5, 1.25)')
self.assertEqual(repr(count(10.5, 1)), 'count(10.5)') # suppress step=1 when it's an int
self.assertEqual(repr(count(10.5, 1.00)), 'count(10.5, 1.0)') # do show float values lilke 1.0
for i in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 10, sys.maxsize-5, sys.maxsize+5):
for j in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 1, 10, sys.maxsize-5, sys.maxsize+5):
# Test repr
r1 = repr(count(i, j))
if j == 1:
r2 = ('count(%r)' % i)
else:
r2 = ('count(%r, %r)' % (i, j))
self.assertEqual(r1, r2)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, count(i, j))
def test_cycle(self):
self.assertEqual(take(10, cycle('abc')), list('abcabcabca'))
self.assertEqual(list(cycle('')), [])
self.assertRaises(TypeError, cycle)
self.assertRaises(TypeError, cycle, 5)
self.assertEqual(list(islice(cycle(gen3()),10)), [0,1,2,0,1,2,0,1,2,0])
# check copy, deepcopy, pickle
c = cycle('abc')
self.assertEqual(next(c), 'a')
#simple copy currently not supported, because __reduce__ returns
#an internal iterator
#self.assertEqual(take(10, copy.copy(c)), list('bcabcabcab'))
self.assertEqual(take(10, copy.deepcopy(c)), list('bcabcabcab'))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertEqual(take(10, pickle.loads(pickle.dumps(c, proto))),
list('bcabcabcab'))
next(c)
self.assertEqual(take(10, pickle.loads(pickle.dumps(c, proto))),
list('cabcabcabc'))
next(c)
next(c)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, cycle('abc'))
def test_groupby(self):
# Check whether it accepts arguments correctly
self.assertEqual([], list(groupby([])))
self.assertEqual([], list(groupby([], key=id)))
self.assertRaises(TypeError, list, groupby('abc', []))
self.assertRaises(TypeError, groupby, None)
self.assertRaises(TypeError, groupby, 'abc', lambda x:x, 10)
# Check normal input
s = [(0, 10, 20), (0, 11,21), (0,12,21), (1,13,21), (1,14,22),
(2,15,22), (3,16,23), (3,17,23)]
dup = []
for k, g in groupby(s, lambda r:r[0]):
for elem in g:
self.assertEqual(k, elem[0])
dup.append(elem)
self.assertEqual(s, dup)
# Check normal pickled
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
dup = []
for k, g in pickle.loads(pickle.dumps(groupby(s, testR), proto)):
for elem in g:
self.assertEqual(k, elem[0])
dup.append(elem)
self.assertEqual(s, dup)
# Check nested case
dup = []
for k, g in groupby(s, testR):
for ik, ig in groupby(g, testR2):
for elem in ig:
self.assertEqual(k, elem[0])
self.assertEqual(ik, elem[2])
dup.append(elem)
self.assertEqual(s, dup)
# Check nested and pickled
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
dup = []
for k, g in pickle.loads(pickle.dumps(groupby(s, testR), proto)):
for ik, ig in pickle.loads(pickle.dumps(groupby(g, testR2), proto)):
for elem in ig:
self.assertEqual(k, elem[0])
self.assertEqual(ik, elem[2])
dup.append(elem)
self.assertEqual(s, dup)
# Check case where inner iterator is not used
keys = [k for k, g in groupby(s, testR)]
expectedkeys = set([r[0] for r in s])
self.assertEqual(set(keys), expectedkeys)
self.assertEqual(len(keys), len(expectedkeys))
# Exercise pipes and filters style
s = 'abracadabra'
# sort s | uniq
r = [k for k, g in groupby(sorted(s))]
self.assertEqual(r, ['a', 'b', 'c', 'd', 'r'])
# sort s | uniq -d
r = [k for k, g in groupby(sorted(s)) if list(islice(g,1,2))]
self.assertEqual(r, ['a', 'b', 'r'])
# sort s | uniq -c
r = [(len(list(g)), k) for k, g in groupby(sorted(s))]
self.assertEqual(r, [(5, 'a'), (2, 'b'), (1, 'c'), (1, 'd'), (2, 'r')])
# sort s | uniq -c | sort -rn | head -3
r = sorted([(len(list(g)) , k) for k, g in groupby(sorted(s))], reverse=True)[:3]
self.assertEqual(r, [(5, 'a'), (2, 'r'), (2, 'b')])
# iter.__next__ failure
class ExpectedError(Exception):
pass
def delayed_raise(n=0):
for i in range(n):
yield 'yo'
raise ExpectedError
def gulp(iterable, keyp=None, func=list):
return [func(g) for k, g in groupby(iterable, keyp)]
# iter.__next__ failure on outer object
self.assertRaises(ExpectedError, gulp, delayed_raise(0))
# iter.__next__ failure on inner object
self.assertRaises(ExpectedError, gulp, delayed_raise(1))
# __cmp__ failure
class DummyCmp:
def __eq__(self, dst):
raise ExpectedError
s = [DummyCmp(), DummyCmp(), None]
# __eq__ failure on outer object
self.assertRaises(ExpectedError, gulp, s, func=id)
# __eq__ failure on inner object
self.assertRaises(ExpectedError, gulp, s)
# keyfunc failure
def keyfunc(obj):
if keyfunc.skip > 0:
keyfunc.skip -= 1
return obj
else:
raise ExpectedError
# keyfunc failure on outer object
keyfunc.skip = 0
self.assertRaises(ExpectedError, gulp, [None], keyfunc)
keyfunc.skip = 1
self.assertRaises(ExpectedError, gulp, [None, None], keyfunc)
def test_filter(self):
self.assertEqual(list(filter(isEven, range(6))), [0,2,4])
self.assertEqual(list(filter(None, [0,1,0,2,0])), [1,2])
self.assertEqual(list(filter(bool, [0,1,0,2,0])), [1,2])
self.assertEqual(take(4, filter(isEven, count())), [0,2,4,6])
self.assertRaises(TypeError, filter)
self.assertRaises(TypeError, filter, lambda x:x)
self.assertRaises(TypeError, filter, lambda x:x, range(6), 7)
self.assertRaises(TypeError, filter, isEven, 3)
self.assertRaises(TypeError, next, filter(range(6), range(6)))
# check copy, deepcopy, pickle
ans = [0,2,4]
c = filter(isEven, range(6))
self.assertEqual(list(copy.copy(c)), ans)
c = filter(isEven, range(6))
self.assertEqual(list(copy.deepcopy(c)), ans)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
c = filter(isEven, range(6))
self.assertEqual(list(pickle.loads(pickle.dumps(c, proto))), ans)
next(c)
self.assertEqual(list(pickle.loads(pickle.dumps(c, proto))), ans[1:])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
c = filter(isEven, range(6))
self.pickletest(proto, c)
def test_filterfalse(self):
self.assertEqual(list(filterfalse(isEven, range(6))), [1,3,5])
self.assertEqual(list(filterfalse(None, [0,1,0,2,0])), [0,0,0])
self.assertEqual(list(filterfalse(bool, [0,1,0,2,0])), [0,0,0])
self.assertEqual(take(4, filterfalse(isEven, count())), [1,3,5,7])
self.assertRaises(TypeError, filterfalse)
self.assertRaises(TypeError, filterfalse, lambda x:x)
self.assertRaises(TypeError, filterfalse, lambda x:x, range(6), 7)
self.assertRaises(TypeError, filterfalse, isEven, 3)
self.assertRaises(TypeError, next, filterfalse(range(6), range(6)))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, filterfalse(isEven, range(6)))
def test_zip(self):
# XXX This is rather silly now that builtin zip() calls zip()...
ans = [(x,y) for x, y in zip('abc',count())]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
self.assertEqual(list(zip('abc', range(6))), lzip('abc', range(6)))
self.assertEqual(list(zip('abcdef', range(3))), lzip('abcdef', range(3)))
self.assertEqual(take(3,zip('abcdef', count())), lzip('abcdef', range(3)))
self.assertEqual(list(zip('abcdef')), lzip('abcdef'))
self.assertEqual(list(zip()), lzip())
self.assertRaises(TypeError, zip, 3)
self.assertRaises(TypeError, zip, range(3), 3)
self.assertEqual([tuple(list(pair)) for pair in zip('abc', 'def')],
lzip('abc', 'def'))
self.assertEqual([pair for pair in zip('abc', 'def')],
lzip('abc', 'def'))
@support.impl_detail("tuple reuse is specific to CPython")
def test_zip_tuple_reuse(self):
ids = list(map(id, zip('abc', 'def')))
self.assertEqual(min(ids), max(ids))
ids = list(map(id, list(zip('abc', 'def'))))
self.assertEqual(len(dict.fromkeys(ids)), len(ids))
# check copy, deepcopy, pickle
ans = [(x,y) for x, y in copy.copy(zip('abc',count()))]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
ans = [(x,y) for x, y in copy.deepcopy(zip('abc',count()))]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
ans = [(x,y) for x, y in pickle.loads(pickle.dumps(zip('abc',count()), proto))]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
testIntermediate = zip('abc',count())
next(testIntermediate)
ans = [(x,y) for x, y in pickle.loads(pickle.dumps(testIntermediate, proto))]
self.assertEqual(ans, [('b', 1), ('c', 2)])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, zip('abc', count()))
def test_ziplongest(self):
for args in [
['abc', range(6)],
[range(6), 'abc'],
[range(1000), range(2000,2100), range(3000,3050)],
[range(1000), range(0), range(3000,3050), range(1200), range(1500)],
[range(1000), range(0), range(3000,3050), range(1200), range(1500), range(0)],
]:
target = [tuple([arg[i] if i < len(arg) else None for arg in args])
for i in range(max(map(len, args)))]
self.assertEqual(list(zip_longest(*args)), target)
self.assertEqual(list(zip_longest(*args, **{})), target)
target = [tuple((e is None and 'X' or e) for e in t) for t in target] # Replace None fills with 'X'
self.assertEqual(list(zip_longest(*args, **dict(fillvalue='X'))), target)
self.assertEqual(take(3,zip_longest('abcdef', count())), list(zip('abcdef', range(3)))) # take 3 from infinite input
self.assertEqual(list(zip_longest()), list(zip()))
self.assertEqual(list(zip_longest([])), list(zip([])))
self.assertEqual(list(zip_longest('abcdef')), list(zip('abcdef')))
self.assertEqual(list(zip_longest('abc', 'defg', **{})),
list(zip(list('abc')+[None], 'defg'))) # empty keyword dict
self.assertRaises(TypeError, zip_longest, 3)
self.assertRaises(TypeError, zip_longest, range(3), 3)
for stmt in [
"zip_longest('abc', fv=1)",
"zip_longest('abc', fillvalue=1, bogus_keyword=None)",
]:
try:
eval(stmt, globals(), locals())
except TypeError:
pass
else:
self.fail('Did not raise Type in: ' + stmt)
self.assertEqual([tuple(list(pair)) for pair in zip_longest('abc', 'def')],
list(zip('abc', 'def')))
self.assertEqual([pair for pair in zip_longest('abc', 'def')],
list(zip('abc', 'def')))
@support.impl_detail("tuple reuse is specific to CPython")
def test_zip_longest_tuple_reuse(self):
ids = list(map(id, zip_longest('abc', 'def')))
self.assertEqual(min(ids), max(ids))
ids = list(map(id, list(zip_longest('abc', 'def'))))
self.assertEqual(len(dict.fromkeys(ids)), len(ids))
def test_zip_longest_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, zip_longest("abc", "def"))
self.pickletest(proto, zip_longest("abc", "defgh"))
self.pickletest(proto, zip_longest("abc", "defgh", fillvalue=1))
self.pickletest(proto, zip_longest("", "defgh"))
def test_bug_7244(self):
class Repeater:
# this class is similar to itertools.repeat
def __init__(self, o, t, e):
self.o = o
self.t = int(t)
self.e = e
def __iter__(self): # its iterator is itself
return self
def __next__(self):
if self.t > 0:
self.t -= 1
return self.o
else:
raise self.e
# Formerly this code in would fail in debug mode
# with Undetected Error and Stop Iteration
r1 = Repeater(1, 3, StopIteration)
r2 = Repeater(2, 4, StopIteration)
def run(r1, r2):
result = []
for i, j in zip_longest(r1, r2, fillvalue=0):
with support.captured_output('stdout'):
print((i, j))
result.append((i, j))
return result
self.assertEqual(run(r1, r2), [(1,2), (1,2), (1,2), (0,2)])
# Formerly, the RuntimeError would be lost
# and StopIteration would stop as expected
r1 = Repeater(1, 3, RuntimeError)
r2 = Repeater(2, 4, StopIteration)
it = zip_longest(r1, r2, fillvalue=0)
self.assertEqual(next(it), (1, 2))
self.assertEqual(next(it), (1, 2))
self.assertEqual(next(it), (1, 2))
self.assertRaises(RuntimeError, next, it)
def test_product(self):
for args, result in [
([], [()]), # zero iterables
(['ab'], [('a',), ('b',)]), # one iterable
([range(2), range(3)], [(0,0), (0,1), (0,2), (1,0), (1,1), (1,2)]), # two iterables
([range(0), range(2), range(3)], []), # first iterable with zero length
([range(2), range(0), range(3)], []), # middle iterable with zero length
([range(2), range(3), range(0)], []), # last iterable with zero length
]:
self.assertEqual(list(product(*args)), result)
for r in range(4):
self.assertEqual(list(product(*(args*r))),
list(product(*args, **dict(repeat=r))))
self.assertEqual(len(list(product(*[range(7)]*6))), 7**6)
self.assertRaises(TypeError, product, range(6), None)
def product1(*args, **kwds):
pools = list(map(tuple, args)) * kwds.get('repeat', 1)
n = len(pools)
if n == 0:
yield ()
return
if any(len(pool) == 0 for pool in pools):
return
indices = [0] * n
yield tuple(pool[i] for pool, i in zip(pools, indices))
while 1:
for i in reversed(range(n)): # right to left
if indices[i] == len(pools[i]) - 1:
continue
indices[i] += 1
for j in range(i+1, n):
indices[j] = 0
yield tuple(pool[i] for pool, i in zip(pools, indices))
break
else:
return
def product2(*args, **kwds):
'Pure python version used in docs'
pools = list(map(tuple, args)) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
argtypes = ['', 'abc', '', range(0), range(4), dict(a=1, b=2, c=3),
set('abcdefg'), range(11), tuple(range(13))]
for i in range(100):
args = [random.choice(argtypes) for j in range(random.randrange(5))]
expected_len = prod(map(len, args))
self.assertEqual(len(list(product(*args))), expected_len)
self.assertEqual(list(product(*args)), list(product1(*args)))
self.assertEqual(list(product(*args)), list(product2(*args)))
args = map(iter, args)
self.assertEqual(len(list(product(*args))), expected_len)
@support.bigaddrspacetest
def test_product_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
product(*(['ab']*2**5), repeat=2**25)
@support.impl_detail("tuple reuse is specific to CPython")
def test_product_tuple_reuse(self):
self.assertEqual(len(set(map(id, product('abc', 'def')))), 1)
self.assertNotEqual(len(set(map(id, list(product('abc', 'def'))))), 1)
def test_product_pickling(self):
# check copy, deepcopy, pickle
for args, result in [
([], [()]), # zero iterables
(['ab'], [('a',), ('b',)]), # one iterable
([range(2), range(3)], [(0,0), (0,1), (0,2), (1,0), (1,1), (1,2)]), # two iterables
([range(0), range(2), range(3)], []), # first iterable with zero length
([range(2), range(0), range(3)], []), # middle iterable with zero length
([range(2), range(3), range(0)], []), # last iterable with zero length
]:
self.assertEqual(list(copy.copy(product(*args))), result)
self.assertEqual(list(copy.deepcopy(product(*args))), result)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, product(*args))
def test_repeat(self):
self.assertEqual(list(repeat(object='a', times=3)), ['a', 'a', 'a'])
self.assertEqual(lzip(range(3),repeat('a')),
[(0, 'a'), (1, 'a'), (2, 'a')])
self.assertEqual(list(repeat('a', 3)), ['a', 'a', 'a'])
self.assertEqual(take(3, repeat('a')), ['a', 'a', 'a'])
self.assertEqual(list(repeat('a', 0)), [])
self.assertEqual(list(repeat('a', -3)), [])
self.assertRaises(TypeError, repeat)
self.assertRaises(TypeError, repeat, None, 3, 4)
self.assertRaises(TypeError, repeat, None, 'a')
r = repeat(1+0j)
self.assertEqual(repr(r), 'repeat((1+0j))')
r = repeat(1+0j, 5)
self.assertEqual(repr(r), 'repeat((1+0j), 5)')
list(r)
self.assertEqual(repr(r), 'repeat((1+0j), 0)')
# check copy, deepcopy, pickle
c = repeat(object='a', times=10)
self.assertEqual(next(c), 'a')
self.assertEqual(take(2, copy.copy(c)), list('a' * 2))
self.assertEqual(take(2, copy.deepcopy(c)), list('a' * 2))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, repeat(object='a', times=10))
def test_repeat_with_negative_times(self):
self.assertEqual(repr(repeat('a', -1)), "repeat('a', 0)")
self.assertEqual(repr(repeat('a', -2)), "repeat('a', 0)")
self.assertEqual(repr(repeat('a', times=-1)), "repeat('a', 0)")
self.assertEqual(repr(repeat('a', times=-2)), "repeat('a', 0)")
def test_map(self):
self.assertEqual(list(map(operator.pow, range(3), range(1,7))),
[0**1, 1**2, 2**3])
self.assertEqual(list(map(tupleize, 'abc', range(5))),
[('a',0),('b',1),('c',2)])
self.assertEqual(list(map(tupleize, 'abc', count())),
[('a',0),('b',1),('c',2)])
self.assertEqual(take(2,map(tupleize, 'abc', count())),
[('a',0),('b',1)])
self.assertEqual(list(map(operator.pow, [])), [])
self.assertRaises(TypeError, map)
self.assertRaises(TypeError, list, map(None, range(3), range(3)))
self.assertRaises(TypeError, map, operator.neg)
self.assertRaises(TypeError, next, map(10, range(5)))
self.assertRaises(ValueError, next, map(errfunc, [4], [5]))
self.assertRaises(TypeError, next, map(onearg, [4], [5]))
# check copy, deepcopy, pickle
ans = [('a',0),('b',1),('c',2)]
c = map(tupleize, 'abc', count())
self.assertEqual(list(copy.copy(c)), ans)
c = map(tupleize, 'abc', count())
self.assertEqual(list(copy.deepcopy(c)), ans)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
c = map(tupleize, 'abc', count())
self.pickletest(proto, c)
def test_starmap(self):
self.assertEqual(list(starmap(operator.pow, zip(range(3), range(1,7)))),
[0**1, 1**2, 2**3])
self.assertEqual(take(3, starmap(operator.pow, zip(count(), count(1)))),
[0**1, 1**2, 2**3])
self.assertEqual(list(starmap(operator.pow, [])), [])
self.assertEqual(list(starmap(operator.pow, [iter([4,5])])), [4**5])
self.assertRaises(TypeError, list, starmap(operator.pow, [None]))
self.assertRaises(TypeError, starmap)
self.assertRaises(TypeError, starmap, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, next, starmap(10, [(4,5)]))
self.assertRaises(ValueError, next, starmap(errfunc, [(4,5)]))
self.assertRaises(TypeError, next, starmap(onearg, [(4,5)]))
# check copy, deepcopy, pickle
ans = [0**1, 1**2, 2**3]
c = starmap(operator.pow, zip(range(3), range(1,7)))
self.assertEqual(list(copy.copy(c)), ans)
c = starmap(operator.pow, zip(range(3), range(1,7)))
self.assertEqual(list(copy.deepcopy(c)), ans)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
c = starmap(operator.pow, zip(range(3), range(1,7)))
self.pickletest(proto, c)
def test_islice(self):
for args in [ # islice(args) should agree with range(args)
(10, 20, 3),
(10, 3, 20),
(10, 20),
(10, 3),
(20,)
]:
self.assertEqual(list(islice(range(100), *args)),
list(range(*args)))
for args, tgtargs in [ # Stop when seqn is exhausted
((10, 110, 3), ((10, 100, 3))),
((10, 110), ((10, 100))),
((110,), (100,))
]:
self.assertEqual(list(islice(range(100), *args)),
list(range(*tgtargs)))
# Test stop=None
self.assertEqual(list(islice(range(10), None)), list(range(10)))
self.assertEqual(list(islice(range(10), None, None)), list(range(10)))
self.assertEqual(list(islice(range(10), None, None, None)), list(range(10)))
self.assertEqual(list(islice(range(10), 2, None)), list(range(2, 10)))
self.assertEqual(list(islice(range(10), 1, None, 2)), list(range(1, 10, 2)))
# Test number of items consumed SF #1171417
it = iter(range(10))
self.assertEqual(list(islice(it, 3)), list(range(3)))
self.assertEqual(list(it), list(range(3, 10)))
# Test invalid arguments
ra = range(10)
self.assertRaises(TypeError, islice, ra)
self.assertRaises(TypeError, islice, ra, 1, 2, 3, 4)
self.assertRaises(ValueError, islice, ra, -5, 10, 1)
self.assertRaises(ValueError, islice, ra, 1, -5, -1)
self.assertRaises(ValueError, islice, ra, 1, 10, -1)
self.assertRaises(ValueError, islice, ra, 1, 10, 0)
self.assertRaises(ValueError, islice, ra, 'a')
self.assertRaises(ValueError, islice, ra, 'a', 1)
self.assertRaises(ValueError, islice, ra, 1, 'a')
self.assertRaises(ValueError, islice, ra, 'a', 1, 1)
self.assertRaises(ValueError, islice, ra, 1, 'a', 1)
self.assertEqual(len(list(islice(count(), 1, 10, maxsize))), 1)
# Issue #10323: Less islice in a predictable state
c = count()
self.assertEqual(list(islice(c, 1, 3, 50)), [1])
self.assertEqual(next(c), 3)
# check copy, deepcopy, pickle
for args in [ # islice(args) should agree with range(args)
(10, 20, 3),
(10, 3, 20),
(10, 20),
(10, 3),
(20,)
]:
self.assertEqual(list(copy.copy(islice(range(100), *args))),
list(range(*args)))
self.assertEqual(list(copy.deepcopy(islice(range(100), *args))),
list(range(*args)))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, islice(range(100), *args))
# Issue #21321: check source iterator is not referenced
# from islice() after the latter has been exhausted
it = (x for x in (1, 2))
wr = weakref.ref(it)
it = islice(it, 1)
self.assertIsNotNone(wr())
list(it) # exhaust the iterator
support.gc_collect()
self.assertIsNone(wr())
def test_takewhile(self):
data = [1, 3, 5, 20, 2, 4, 6, 8]
self.assertEqual(list(takewhile(underten, data)), [1, 3, 5])
self.assertEqual(list(takewhile(underten, [])), [])
self.assertRaises(TypeError, takewhile)
self.assertRaises(TypeError, takewhile, operator.pow)
self.assertRaises(TypeError, takewhile, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, next, takewhile(10, [(4,5)]))
self.assertRaises(ValueError, next, takewhile(errfunc, [(4,5)]))
t = takewhile(bool, [1, 1, 1, 0, 0, 0])
self.assertEqual(list(t), [1, 1, 1])
self.assertRaises(StopIteration, next, t)
# check copy, deepcopy, pickle
self.assertEqual(list(copy.copy(takewhile(underten, data))), [1, 3, 5])
self.assertEqual(list(copy.deepcopy(takewhile(underten, data))),
[1, 3, 5])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, takewhile(underten, data))
def test_dropwhile(self):
data = [1, 3, 5, 20, 2, 4, 6, 8]
self.assertEqual(list(dropwhile(underten, data)), [20, 2, 4, 6, 8])
self.assertEqual(list(dropwhile(underten, [])), [])
self.assertRaises(TypeError, dropwhile)
self.assertRaises(TypeError, dropwhile, operator.pow)
self.assertRaises(TypeError, dropwhile, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, next, dropwhile(10, [(4,5)]))
self.assertRaises(ValueError, next, dropwhile(errfunc, [(4,5)]))
# check copy, deepcopy, pickle
self.assertEqual(list(copy.copy(dropwhile(underten, data))), [20, 2, 4, 6, 8])
self.assertEqual(list(copy.deepcopy(dropwhile(underten, data))),
[20, 2, 4, 6, 8])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, dropwhile(underten, data))
def test_tee(self):
n = 200
a, b = tee([]) # test empty iterator
self.assertEqual(list(a), [])
self.assertEqual(list(b), [])
a, b = tee(irange(n)) # test 100% interleaved
self.assertEqual(lzip(a,b), lzip(range(n), range(n)))
a, b = tee(irange(n)) # test 0% interleaved
self.assertEqual(list(a), list(range(n)))
self.assertEqual(list(b), list(range(n)))
a, b = tee(irange(n)) # test dealloc of leading iterator
for i in range(100):
self.assertEqual(next(a), i)
del a
self.assertEqual(list(b), list(range(n)))
a, b = tee(irange(n)) # test dealloc of trailing iterator
for i in range(100):
self.assertEqual(next(a), i)
del b
self.assertEqual(list(a), list(range(100, n)))
for j in range(5): # test randomly interleaved
order = [0]*n + [1]*n
random.shuffle(order)
lists = ([], [])
its = tee(irange(n))
for i in order:
value = next(its[i])
lists[i].append(value)
self.assertEqual(lists[0], list(range(n)))
self.assertEqual(lists[1], list(range(n)))
# test argument format checking
self.assertRaises(TypeError, tee)
self.assertRaises(TypeError, tee, 3)
self.assertRaises(TypeError, tee, [1,2], 'x')
self.assertRaises(TypeError, tee, [1,2], 3, 'x')
# tee object should be instantiable
a, b = tee('abc')
c = type(a)('def')
self.assertEqual(list(c), list('def'))
# test long-lagged and multi-way split
a, b, c = tee(range(2000), 3)
for i in range(100):
self.assertEqual(next(a), i)
self.assertEqual(list(b), list(range(2000)))
self.assertEqual([next(c), next(c)], list(range(2)))
self.assertEqual(list(a), list(range(100,2000)))
self.assertEqual(list(c), list(range(2,2000)))
# test values of n
self.assertRaises(TypeError, tee, 'abc', 'invalid')
self.assertRaises(ValueError, tee, [], -1)
for n in range(5):
result = tee('abc', n)
self.assertEqual(type(result), tuple)
self.assertEqual(len(result), n)
self.assertEqual([list(x) for x in result], [list('abc')]*n)
# tee pass-through to copyable iterator
a, b = tee('abc')
c, d = tee(a)
self.assertTrue(a is c)
# test tee_new
t1, t2 = tee('abc')
tnew = type(t1)
self.assertRaises(TypeError, tnew)
self.assertRaises(TypeError, tnew, 10)
t3 = tnew(t1)
self.assertTrue(list(t1) == list(t2) == list(t3) == list('abc'))
# test that tee objects are weak referencable
a, b = tee(range(10))
p = weakref.proxy(a)
self.assertEqual(getattr(p, '__class__'), type(b))
del a
self.assertRaises(ReferenceError, getattr, p, '__class__')
ans = list('abc')
long_ans = list(range(10000))
# check copy
a, b = tee('abc')
self.assertEqual(list(copy.copy(a)), ans)
self.assertEqual(list(copy.copy(b)), ans)
a, b = tee(list(range(10000)))
self.assertEqual(list(copy.copy(a)), long_ans)
self.assertEqual(list(copy.copy(b)), long_ans)
# check partially consumed copy
a, b = tee('abc')
take(2, a)
take(1, b)
self.assertEqual(list(copy.copy(a)), ans[2:])
self.assertEqual(list(copy.copy(b)), ans[1:])
self.assertEqual(list(a), ans[2:])
self.assertEqual(list(b), ans[1:])
a, b = tee(range(10000))
take(100, a)
take(60, b)
self.assertEqual(list(copy.copy(a)), long_ans[100:])
self.assertEqual(list(copy.copy(b)), long_ans[60:])
self.assertEqual(list(a), long_ans[100:])
self.assertEqual(list(b), long_ans[60:])
# check deepcopy
a, b = tee('abc')
self.assertEqual(list(copy.deepcopy(a)), ans)
self.assertEqual(list(copy.deepcopy(b)), ans)
self.assertEqual(list(a), ans)
self.assertEqual(list(b), ans)
a, b = tee(range(10000))
self.assertEqual(list(copy.deepcopy(a)), long_ans)
self.assertEqual(list(copy.deepcopy(b)), long_ans)
self.assertEqual(list(a), long_ans)
self.assertEqual(list(b), long_ans)
# check partially consumed deepcopy
a, b = tee('abc')
take(2, a)
take(1, b)
self.assertEqual(list(copy.deepcopy(a)), ans[2:])
self.assertEqual(list(copy.deepcopy(b)), ans[1:])
self.assertEqual(list(a), ans[2:])
self.assertEqual(list(b), ans[1:])
a, b = tee(range(10000))
take(100, a)
take(60, b)
self.assertEqual(list(copy.deepcopy(a)), long_ans[100:])
self.assertEqual(list(copy.deepcopy(b)), long_ans[60:])
self.assertEqual(list(a), long_ans[100:])
self.assertEqual(list(b), long_ans[60:])
# check pickle
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, iter(tee('abc')))
a, b = tee('abc')
self.pickletest(proto, a, compare=ans)
self.pickletest(proto, b, compare=ans)
# Issue 13454: Crash when deleting backward iterator from tee()
def test_tee_del_backward(self):
forward, backward = tee(repeat(None, 20000000))
any(forward) # exhaust the iterator
del backward
def test_StopIteration(self):
self.assertRaises(StopIteration, next, zip())
for f in (chain, cycle, zip, groupby):
self.assertRaises(StopIteration, next, f([]))
self.assertRaises(StopIteration, next, f(StopNow()))
self.assertRaises(StopIteration, next, islice([], None))
self.assertRaises(StopIteration, next, islice(StopNow(), None))
p, q = tee([])
self.assertRaises(StopIteration, next, p)
self.assertRaises(StopIteration, next, q)
p, q = tee(StopNow())
self.assertRaises(StopIteration, next, p)
self.assertRaises(StopIteration, next, q)
self.assertRaises(StopIteration, next, repeat(None, 0))
for f in (filter, filterfalse, map, takewhile, dropwhile, starmap):
self.assertRaises(StopIteration, next, f(lambda x:x, []))
self.assertRaises(StopIteration, next, f(lambda x:x, StopNow()))
class TestExamples(unittest.TestCase):
def test_accumulate(self):
self.assertEqual(list(accumulate([1,2,3,4,5])), [1, 3, 6, 10, 15])
def test_accumulate_reducible(self):
# check copy, deepcopy, pickle
data = [1, 2, 3, 4, 5]
accumulated = [1, 3, 6, 10, 15]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
it = accumulate(data)
self.assertEqual(list(pickle.loads(pickle.dumps(it, proto))), accumulated[:])
self.assertEqual(next(it), 1)
self.assertEqual(list(pickle.loads(pickle.dumps(it, proto))), accumulated[1:])
it = accumulate(data)
self.assertEqual(next(it), 1)
self.assertEqual(list(copy.deepcopy(it)), accumulated[1:])
self.assertEqual(list(copy.copy(it)), accumulated[1:])
def test_chain(self):
self.assertEqual(''.join(chain('ABC', 'DEF')), 'ABCDEF')
def test_chain_from_iterable(self):
self.assertEqual(''.join(chain.from_iterable(['ABC', 'DEF'])), 'ABCDEF')
def test_combinations(self):
self.assertEqual(list(combinations('ABCD', 2)),
[('A','B'), ('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')])
self.assertEqual(list(combinations(range(4), 3)),
[(0,1,2), (0,1,3), (0,2,3), (1,2,3)])
def test_combinations_with_replacement(self):
self.assertEqual(list(combinations_with_replacement('ABC', 2)),
[('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
def test_compress(self):
self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF'))
def test_count(self):
self.assertEqual(list(islice(count(10), 5)), [10, 11, 12, 13, 14])
def test_cycle(self):
self.assertEqual(list(islice(cycle('ABCD'), 12)), list('ABCDABCDABCD'))
def test_dropwhile(self):
self.assertEqual(list(dropwhile(lambda x: x<5, [1,4,6,4,1])), [6,4,1])
def test_groupby(self):
self.assertEqual([k for k, g in groupby('AAAABBBCCDAABBB')],
list('ABCDAB'))
self.assertEqual([(list(g)) for k, g in groupby('AAAABBBCCD')],
[list('AAAA'), list('BBB'), list('CC'), list('D')])
def test_filter(self):
self.assertEqual(list(filter(lambda x: x%2, range(10))), [1,3,5,7,9])
def test_filterfalse(self):
self.assertEqual(list(filterfalse(lambda x: x%2, range(10))), [0,2,4,6,8])
def test_map(self):
self.assertEqual(list(map(pow, (2,3,10), (5,2,3))), [32, 9, 1000])
def test_islice(self):
self.assertEqual(list(islice('ABCDEFG', 2)), list('AB'))
self.assertEqual(list(islice('ABCDEFG', 2, 4)), list('CD'))
self.assertEqual(list(islice('ABCDEFG', 2, None)), list('CDEFG'))
self.assertEqual(list(islice('ABCDEFG', 0, None, 2)), list('ACEG'))
def test_zip(self):
self.assertEqual(list(zip('ABCD', 'xy')), [('A', 'x'), ('B', 'y')])
def test_zip_longest(self):
self.assertEqual(list(zip_longest('ABCD', 'xy', fillvalue='-')),
[('A', 'x'), ('B', 'y'), ('C', '-'), ('D', '-')])
def test_permutations(self):
self.assertEqual(list(permutations('ABCD', 2)),
list(map(tuple, 'AB AC AD BA BC BD CA CB CD DA DB DC'.split())))
self.assertEqual(list(permutations(range(3))),
[(0,1,2), (0,2,1), (1,0,2), (1,2,0), (2,0,1), (2,1,0)])
def test_product(self):
self.assertEqual(list(product('ABCD', 'xy')),
list(map(tuple, 'Ax Ay Bx By Cx Cy Dx Dy'.split())))
self.assertEqual(list(product(range(2), repeat=3)),
[(0,0,0), (0,0,1), (0,1,0), (0,1,1),
(1,0,0), (1,0,1), (1,1,0), (1,1,1)])
def test_repeat(self):
self.assertEqual(list(repeat(10, 3)), [10, 10, 10])
def test_stapmap(self):
self.assertEqual(list(starmap(pow, [(2,5), (3,2), (10,3)])),
[32, 9, 1000])
def test_takewhile(self):
self.assertEqual(list(takewhile(lambda x: x<5, [1,4,6,4,1])), [1,4])
class TestGC(unittest.TestCase):
def makecycle(self, iterator, container):
container.append(iterator)
next(iterator)
del container, iterator
def test_accumulate(self):
a = []
self.makecycle(accumulate([1,2,a,3]), a)
def test_chain(self):
a = []
self.makecycle(chain(a), a)
def test_chain_from_iterable(self):
a = []
self.makecycle(chain.from_iterable([a]), a)
def test_combinations(self):
a = []
self.makecycle(combinations([1,2,a,3], 3), a)
def test_combinations_with_replacement(self):
a = []
self.makecycle(combinations_with_replacement([1,2,a,3], 3), a)
def test_compress(self):
a = []
self.makecycle(compress('ABCDEF', [1,0,1,0,1,0]), a)
def test_count(self):
a = []
Int = type('Int', (int,), dict(x=a))
self.makecycle(count(Int(0), Int(1)), a)
def test_cycle(self):
a = []
self.makecycle(cycle([a]*2), a)
def test_dropwhile(self):
a = []
self.makecycle(dropwhile(bool, [0, a, a]), a)
def test_groupby(self):
a = []
self.makecycle(groupby([a]*2, lambda x:x), a)
def test_issue2246(self):
# Issue 2246 -- the _grouper iterator was not included in GC
n = 10
keyfunc = lambda x: x
for i, j in groupby(range(n), key=keyfunc):
keyfunc.__dict__.setdefault('x',[]).append(j)
def test_filter(self):
a = []
self.makecycle(filter(lambda x:True, [a]*2), a)
def test_filterfalse(self):
a = []
self.makecycle(filterfalse(lambda x:False, a), a)
def test_zip(self):
a = []
self.makecycle(zip([a]*2, [a]*3), a)
def test_zip_longest(self):
a = []
self.makecycle(zip_longest([a]*2, [a]*3), a)
b = [a, None]
self.makecycle(zip_longest([a]*2, [a]*3, fillvalue=b), a)
def test_map(self):
a = []
self.makecycle(map(lambda x:x, [a]*2), a)
def test_islice(self):
a = []
self.makecycle(islice([a]*2, None), a)
def test_permutations(self):
a = []
self.makecycle(permutations([1,2,a,3], 3), a)
def test_product(self):
a = []
self.makecycle(product([1,2,a,3], repeat=3), a)
def test_repeat(self):
a = []
self.makecycle(repeat(a), a)
def test_starmap(self):
a = []
self.makecycle(starmap(lambda *t: t, [(a,a)]*2), a)
def test_takewhile(self):
a = []
self.makecycle(takewhile(bool, [1, 0, a, a]), a)
def R(seqn):
'Regular generator'
for i in seqn:
yield i
class G:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class I:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class Ig:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class X:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class N:
'Iterator missing __next__()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class E:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
3 // 0
class S:
'Test immediate stop'
def __init__(self, seqn):
pass
def __iter__(self):
return self
def __next__(self):
raise StopIteration
def L(seqn):
'Test multiple tiers of iterators'
return chain(map(lambda x:x, R(Ig(G(seqn)))))
class TestVariousIteratorArgs(unittest.TestCase):
def test_accumulate(self):
s = [1,2,3,4,5]
r = [1,3,6,10,15]
n = len(s)
for g in (G, I, Ig, L, R):
self.assertEqual(list(accumulate(g(s))), r)
self.assertEqual(list(accumulate(S(s))), [])
self.assertRaises(TypeError, accumulate, X(s))
self.assertRaises(TypeError, accumulate, N(s))
self.assertRaises(ZeroDivisionError, list, accumulate(E(s)))
def test_chain(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(chain(g(s))), list(g(s)))
self.assertEqual(list(chain(g(s), g(s))), list(g(s))+list(g(s)))
self.assertRaises(TypeError, list, chain(X(s)))
self.assertRaises(TypeError, list, chain(N(s)))
self.assertRaises(ZeroDivisionError, list, chain(E(s)))
def test_compress(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
n = len(s)
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(compress(g(s), repeat(1))), list(g(s)))
self.assertRaises(TypeError, compress, X(s), repeat(1))
self.assertRaises(TypeError, compress, N(s), repeat(1))
self.assertRaises(ZeroDivisionError, list, compress(E(s), repeat(1)))
def test_product(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
self.assertRaises(TypeError, product, X(s))
self.assertRaises(TypeError, product, N(s))
self.assertRaises(ZeroDivisionError, product, E(s))
def test_cycle(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgtlen = len(s) * 3
expected = list(g(s))*3
actual = list(islice(cycle(g(s)), tgtlen))
self.assertEqual(actual, expected)
self.assertRaises(TypeError, cycle, X(s))
self.assertRaises(TypeError, cycle, N(s))
self.assertRaises(ZeroDivisionError, list, cycle(E(s)))
def test_groupby(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual([k for k, sb in groupby(g(s))], list(g(s)))
self.assertRaises(TypeError, groupby, X(s))
self.assertRaises(TypeError, groupby, N(s))
self.assertRaises(ZeroDivisionError, list, groupby(E(s)))
def test_filter(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(filter(isEven, g(s))),
[x for x in g(s) if isEven(x)])
self.assertRaises(TypeError, filter, isEven, X(s))
self.assertRaises(TypeError, filter, isEven, N(s))
self.assertRaises(ZeroDivisionError, list, filter(isEven, E(s)))
def test_filterfalse(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(filterfalse(isEven, g(s))),
[x for x in g(s) if isOdd(x)])
self.assertRaises(TypeError, filterfalse, isEven, X(s))
self.assertRaises(TypeError, filterfalse, isEven, N(s))
self.assertRaises(ZeroDivisionError, list, filterfalse(isEven, E(s)))
def test_zip(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(zip(g(s))), lzip(g(s)))
self.assertEqual(list(zip(g(s), g(s))), lzip(g(s), g(s)))
self.assertRaises(TypeError, zip, X(s))
self.assertRaises(TypeError, zip, N(s))
self.assertRaises(ZeroDivisionError, list, zip(E(s)))
def test_ziplongest(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(zip_longest(g(s))), list(zip(g(s))))
self.assertEqual(list(zip_longest(g(s), g(s))), list(zip(g(s), g(s))))
self.assertRaises(TypeError, zip_longest, X(s))
self.assertRaises(TypeError, zip_longest, N(s))
self.assertRaises(ZeroDivisionError, list, zip_longest(E(s)))
def test_map(self):
for s in (range(10), range(0), range(100), (7,11), range(20,50,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(map(onearg, g(s))),
[onearg(x) for x in g(s)])
self.assertEqual(list(map(operator.pow, g(s), g(s))),
[x**x for x in g(s)])
self.assertRaises(TypeError, map, onearg, X(s))
self.assertRaises(TypeError, map, onearg, N(s))
self.assertRaises(ZeroDivisionError, list, map(onearg, E(s)))
def test_islice(self):
for s in ("12345", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(islice(g(s),1,None,2)), list(g(s))[1::2])
self.assertRaises(TypeError, islice, X(s), 10)
self.assertRaises(TypeError, islice, N(s), 10)
self.assertRaises(ZeroDivisionError, list, islice(E(s), 10))
def test_starmap(self):
for s in (range(10), range(0), range(100), (7,11), range(20,50,5)):
for g in (G, I, Ig, S, L, R):
ss = lzip(s, s)
self.assertEqual(list(starmap(operator.pow, g(ss))),
[x**x for x in g(s)])
self.assertRaises(TypeError, starmap, operator.pow, X(ss))
self.assertRaises(TypeError, starmap, operator.pow, N(ss))
self.assertRaises(ZeroDivisionError, list, starmap(operator.pow, E(ss)))
def test_takewhile(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgt = []
for elem in g(s):
if not isEven(elem): break
tgt.append(elem)
self.assertEqual(list(takewhile(isEven, g(s))), tgt)
self.assertRaises(TypeError, takewhile, isEven, X(s))
self.assertRaises(TypeError, takewhile, isEven, N(s))
self.assertRaises(ZeroDivisionError, list, takewhile(isEven, E(s)))
def test_dropwhile(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgt = []
for elem in g(s):
if not tgt and isOdd(elem): continue
tgt.append(elem)
self.assertEqual(list(dropwhile(isOdd, g(s))), tgt)
self.assertRaises(TypeError, dropwhile, isOdd, X(s))
self.assertRaises(TypeError, dropwhile, isOdd, N(s))
self.assertRaises(ZeroDivisionError, list, dropwhile(isOdd, E(s)))
def test_tee(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
it1, it2 = tee(g(s))
self.assertEqual(list(it1), list(g(s)))
self.assertEqual(list(it2), list(g(s)))
self.assertRaises(TypeError, tee, X(s))
self.assertRaises(TypeError, tee, N(s))
self.assertRaises(ZeroDivisionError, list, tee(E(s))[0])
class LengthTransparency(unittest.TestCase):
def test_repeat(self):
self.assertEqual(operator.length_hint(repeat(None, 50)), 50)
self.assertEqual(operator.length_hint(repeat(None, 0)), 0)
self.assertEqual(operator.length_hint(repeat(None), 12), 12)
def test_repeat_with_negative_times(self):
self.assertEqual(operator.length_hint(repeat(None, -1)), 0)
self.assertEqual(operator.length_hint(repeat(None, -2)), 0)
self.assertEqual(operator.length_hint(repeat(None, times=-1)), 0)
self.assertEqual(operator.length_hint(repeat(None, times=-2)), 0)
class RegressionTests(unittest.TestCase):
def test_sf_793826(self):
# Fix Armin Rigo's successful efforts to wreak havoc
def mutatingtuple(tuple1, f, tuple2):
# this builds a tuple t which is a copy of tuple1,
# then calls f(t), then mutates t to be equal to tuple2
# (needs len(tuple1) == len(tuple2)).
def g(value, first=[1]):
if first:
del first[:]
f(next(z))
return value
items = list(tuple2)
items[1:1] = list(tuple1)
gen = map(g, items)
z = zip(*[gen]*len(tuple1))
next(z)
def f(t):
global T
T = t
first[:] = list(T)
first = []
mutatingtuple((1,2,3), f, (4,5,6))
second = list(T)
self.assertEqual(first, second)
def test_sf_950057(self):
# Make sure that chain() and cycle() catch exceptions immediately
# rather than when shifting between input sources
def gen1():
hist.append(0)
yield 1
hist.append(1)
raise AssertionError
hist.append(2)
def gen2(x):
hist.append(3)
yield 2
hist.append(4)
if x:
raise StopIteration
hist = []
self.assertRaises(AssertionError, list, chain(gen1(), gen2(False)))
self.assertEqual(hist, [0,1])
hist = []
self.assertRaises(AssertionError, list, chain(gen1(), gen2(True)))
self.assertEqual(hist, [0,1])
hist = []
self.assertRaises(AssertionError, list, cycle(gen1()))
self.assertEqual(hist, [0,1])
class SubclassWithKwargsTest(unittest.TestCase):
def test_keywords_in_subclass(self):
# count is not subclassable...
for cls in (repeat, zip, filter, filterfalse, chain, map,
starmap, islice, takewhile, dropwhile, cycle, compress):
class Subclass(cls):
def __init__(self, newarg=None, *args):
cls.__init__(self, *args)
try:
Subclass(newarg=1)
except TypeError as err:
# we expect type errors because of wrong argument count
self.assertNotIn("does not take keyword arguments", err.args[0])
@support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.ssize_t = struct.calcsize('n')
check_sizeof = support.check_sizeof
def test_product_sizeof(self):
basesize = support.calcobjsize('3Pi')
check = self.check_sizeof
check(product('ab', '12'), basesize + 2 * self.ssize_t)
check(product(*(('abc',) * 10)), basesize + 10 * self.ssize_t)
def test_combinations_sizeof(self):
basesize = support.calcobjsize('3Pni')
check = self.check_sizeof
check(combinations('abcd', 3), basesize + 3 * self.ssize_t)
check(combinations(range(10), 4), basesize + 4 * self.ssize_t)
def test_combinations_with_replacement_sizeof(self):
cwr = combinations_with_replacement
basesize = support.calcobjsize('3Pni')
check = self.check_sizeof
check(cwr('abcd', 3), basesize + 3 * self.ssize_t)
check(cwr(range(10), 4), basesize + 4 * self.ssize_t)
def test_permutations_sizeof(self):
basesize = support.calcobjsize('4Pni')
check = self.check_sizeof
check(permutations('abcd'),
basesize + 4 * self.ssize_t + 4 * self.ssize_t)
check(permutations('abcd', 3),
basesize + 4 * self.ssize_t + 3 * self.ssize_t)
check(permutations('abcde', 3),
basesize + 5 * self.ssize_t + 3 * self.ssize_t)
check(permutations(range(10), 4),
basesize + 10 * self.ssize_t + 4 * self.ssize_t)
libreftest = """ Doctest for examples in the library reference: libitertools.tex
>>> amounts = [120.15, 764.05, 823.14]
>>> for checknum, amount in zip(count(1200), amounts):
... print('Check %d is for $%.2f' % (checknum, amount))
...
Check 1200 is for $120.15
Check 1201 is for $764.05
Check 1202 is for $823.14
>>> import operator
>>> for cube in map(operator.pow, range(1,4), repeat(3)):
... print(cube)
...
1
8
27
>>> reportlines = ['EuroPython', 'Roster', '', 'alex', '', 'laura', '', 'martin', '', 'walter', '', 'samuele']
>>> for name in islice(reportlines, 3, None, 2):
... print(name.title())
...
Alex
Laura
Martin
Walter
Samuele
>>> from operator import itemgetter
>>> d = dict(a=1, b=2, c=1, d=2, e=1, f=2, g=3)
>>> di = sorted(sorted(d.items()), key=itemgetter(1))
>>> for k, g in groupby(di, itemgetter(1)):
... print(k, list(map(itemgetter(0), g)))
...
1 ['a', 'c', 'e']
2 ['b', 'd', 'f']
3 ['g']
# Find runs of consecutive numbers using groupby. The key to the solution
# is differencing with a range so that consecutive numbers all appear in
# same group.
>>> data = [ 1, 4,5,6, 10, 15,16,17,18, 22, 25,26,27,28]
>>> for k, g in groupby(enumerate(data), lambda t:t[0]-t[1]):
... print(list(map(operator.itemgetter(1), g)))
...
[1]
[4, 5, 6]
[10]
[15, 16, 17, 18]
[22]
[25, 26, 27, 28]
>>> def take(n, iterable):
... "Return first n items of the iterable as a list"
... return list(islice(iterable, n))
>>> def enumerate(iterable, start=0):
... return zip(count(start), iterable)
>>> def tabulate(function, start=0):
... "Return function(0), function(1), ..."
... return map(function, count(start))
>>> def nth(iterable, n, default=None):
... "Returns the nth item or a default value"
... return next(islice(iterable, n, None), default)
>>> def quantify(iterable, pred=bool):
... "Count how many times the predicate is true"
... return sum(map(pred, iterable))
>>> def padnone(iterable):
... "Returns the sequence elements and then returns None indefinitely"
... return chain(iterable, repeat(None))
>>> def ncycles(iterable, n):
... "Returns the sequence elements n times"
... return chain(*repeat(iterable, n))
>>> def dotproduct(vec1, vec2):
... return sum(map(operator.mul, vec1, vec2))
>>> def flatten(listOfLists):
... return list(chain.from_iterable(listOfLists))
>>> def repeatfunc(func, times=None, *args):
... "Repeat calls to func with specified arguments."
... " Example: repeatfunc(random.random)"
... if times is None:
... return starmap(func, repeat(args))
... else:
... return starmap(func, repeat(args, times))
>>> def pairwise(iterable):
... "s -> (s0,s1), (s1,s2), (s2, s3), ..."
... a, b = tee(iterable)
... try:
... next(b)
... except StopIteration:
... pass
... return zip(a, b)
>>> def grouper(n, iterable, fillvalue=None):
... "grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
... args = [iter(iterable)] * n
... return zip_longest(*args, fillvalue=fillvalue)
>>> def roundrobin(*iterables):
... "roundrobin('ABC', 'D', 'EF') --> A D E B F C"
... # Recipe credited to George Sakkis
... pending = len(iterables)
... nexts = cycle(iter(it).__next__ for it in iterables)
... while pending:
... try:
... for next in nexts:
... yield next()
... except StopIteration:
... pending -= 1
... nexts = cycle(islice(nexts, pending))
>>> def powerset(iterable):
... "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
... s = list(iterable)
... return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
>>> def unique_everseen(iterable, key=None):
... "List unique elements, preserving order. Remember all elements ever seen."
... # unique_everseen('AAAABBBCCDAABBB') --> A B C D
... # unique_everseen('ABBCcAD', str.lower) --> A B C D
... seen = set()
... seen_add = seen.add
... if key is None:
... for element in iterable:
... if element not in seen:
... seen_add(element)
... yield element
... else:
... for element in iterable:
... k = key(element)
... if k not in seen:
... seen_add(k)
... yield element
>>> def unique_justseen(iterable, key=None):
... "List unique elements, preserving order. Remember only the element just seen."
... # unique_justseen('AAAABBBCCDAABBB') --> A B C D A B
... # unique_justseen('ABBCcAD', str.lower) --> A B C A D
... return map(next, map(itemgetter(1), groupby(iterable, key)))
>>> def first_true(iterable, default=False, pred=None):
... '''Returns the first true value in the iterable.
...
... If no true value is found, returns *default*
...
... If *pred* is not None, returns the first item
... for which pred(item) is true.
...
... '''
... # first_true([a,b,c], x) --> a or b or c or x
... # first_true([a,b], x, f) --> a if f(a) else b if f(b) else x
... return next(filter(pred, iterable), default)
This is not part of the examples but it tests to make sure the definitions
perform as purported.
>>> take(10, count())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> list(enumerate('abc'))
[(0, 'a'), (1, 'b'), (2, 'c')]
>>> list(islice(tabulate(lambda x: 2*x), 4))
[0, 2, 4, 6]
>>> nth('abcde', 3)
'd'
>>> nth('abcde', 9) is None
True
>>> quantify(range(99), lambda x: x%2==0)
50
>>> a = [[1, 2, 3], [4, 5, 6]]
>>> flatten(a)
[1, 2, 3, 4, 5, 6]
>>> list(repeatfunc(pow, 5, 2, 3))
[8, 8, 8, 8, 8]
>>> import random
>>> take(5, map(int, repeatfunc(random.random)))
[0, 0, 0, 0, 0]
>>> list(pairwise('abcd'))
[('a', 'b'), ('b', 'c'), ('c', 'd')]
>>> list(pairwise([]))
[]
>>> list(pairwise('a'))
[]
>>> list(islice(padnone('abc'), 0, 6))
['a', 'b', 'c', None, None, None]
>>> list(ncycles('abc', 3))
['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c']
>>> dotproduct([1,2,3], [4,5,6])
32
>>> list(grouper(3, 'abcdefg', 'x'))
[('a', 'b', 'c'), ('d', 'e', 'f'), ('g', 'x', 'x')]
>>> list(roundrobin('abc', 'd', 'ef'))
['a', 'd', 'e', 'b', 'f', 'c']
>>> list(powerset([1,2,3]))
[(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
>>> all(len(list(powerset(range(n)))) == 2**n for n in range(18))
True
>>> list(powerset('abcde')) == sorted(sorted(set(powerset('abcde'))), key=len)
True
>>> list(unique_everseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D']
>>> list(unique_everseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'D']
>>> list(unique_justseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D', 'A', 'B']
>>> list(unique_justseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'A', 'D']
>>> first_true('ABC0DEF1', '9', str.isdigit)
'0'
"""
__test__ = {'libreftest' : libreftest}
def test_main(verbose=None):
test_classes = (TestBasicOps, TestVariousIteratorArgs, TestGC,
RegressionTests, LengthTransparency,
SubclassWithKwargsTest, TestExamples,
SizeofTest)
support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in range(len(counts)):
support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print(counts)
# doctest the examples in the library reference
support.run_doctest(sys.modules[__name__], verbose)
if __name__ == "__main__":
test_main(verbose=True)
| mit | -6,215,571,929,767,405,000 | 39.67836 | 124 | 0.537625 | false |
elmgone/coligui | vendor.etc/json-to-elm/generate.py | 5 | 3247 | from __future__ import print_function, unicode_literals
import json
import sys
from type_alias import create_type_alias, find_type_aliases, find_union_types
from decoder import create_decoder, create_encoder, create_union_type_decoder, create_union_type_encoder
from helpers import *
def test():
testJson = """
{ "name" : "Noah"
, "age" : 23
, "location" :
{ "name" : "sweden"
, "days" : 45
}
}
"""
stuff = json.loads(testJson)
print('Creating type alias')
aliases = create_type_alias(stuff, type_alias_name='Assignment')
print('\n'.join(aliases))
print('Creating decoder')
decoders = []
for alias in aliases:
decoder = create_decoder(alias, has_snakecase=True, prefix='decode')
decoders.append(decoder)
print('\n'.join(decoders))
print_everything(
"""
{ "name" : "Noah"
, "age" : 23
, "location" :
{ "name" : "sweden"
, "days" : 45
}
}
"""
, alias_name = "Person")
print(create_union_type_decoder('type Action = Run | Hide | Noop'))
print(create_union_type_encoder('type Action = Run | Hide | Noop'))
def print_everything(some_json, alias_name):
stuff = json.loads(some_json)
aliases = create_type_alias(stuff, type_alias_name=alias_name)
decoders = [create_decoder(alias, has_snakecase=True, prefix='decode') for alias in aliases ]
encoders = [create_encoder(alias, has_snakecase=True, prefix='encode') for alias in aliases ]
print('\n'.join(aliases))
print('\n'.join(decoders))
print('\n'.join(encoders))
def from_elm_file(file_text):
aliases = find_type_aliases(file_text)
unions = find_union_types(file_text)
decoders = [create_decoder(alias, has_snakecase=True, prefix='decode') for alias in aliases ]
decoders.extend(create_union_type_decoder(union_type) for union_type in unions)
encoders = [create_encoder(alias, has_snakecase=True, prefix='encode') for alias in aliases ]
encoders.extend(create_union_type_encoder(union_type) for union_type in unions)
print('\n'.join(decoders))
print('\n'.join(encoders))
text ="""
type Action =
Run | Hide
type alias Location =
{ name : String
, days : Int
}
type alias Person =
{ age : Int
, name : String
, location : Location
}
location =
{ name = "Noah"
, days = 45
}
view =
div [] []
"""
def message(*args):
print(*args, file=sys.stderr)
def main():
if len(sys.argv) < 2:
message('Give me some elm file names and I\'ll give you decoders and encoders')
message('Or give me some json files and I\'ll give you a type alias and decoders and encoders')
return
for arg in sys.argv[1:]:
if arg.endswith('.elm'):
message('Generating decoders and encoders for {name}'.format(name=arg))
with open(arg) as f:
from_elm_file(f.read())
if arg.endswith('.json'):
message('Generating type alias, decoders and encoders from {name}'.format(name=arg))
with open(arg) as f:
arg = arg.split('/')[-1]
name = arg.split('.')[0].capitalize()
print_everything(f.read(), name)
if __name__ == '__main__':
main()
| apache-2.0 | -5,751,564,635,275,268,000 | 23.78626 | 104 | 0.615029 | false |
chirilo/kuma | vendor/packages/setuptools/command/test.py | 363 | 6471 | from distutils.errors import DistutilsOptionError
from unittest import TestLoader
import unittest
import sys
from pkg_resources import (resource_listdir, resource_exists, normalize_path,
working_set, _namespace_packages,
add_activation_listener, require, EntryPoint)
from setuptools import Command
from setuptools.compat import PY3
from setuptools.py31compat import unittest_main
class ScanningLoader(TestLoader):
def loadTestsFromModule(self, module):
"""Return a suite of all tests cases contained in the given module
If the module is a package, load tests from all the modules in it.
If the module has an ``additional_tests`` function, call it and add
the return value to the tests.
"""
tests = []
tests.append(TestLoader.loadTestsFromModule(self, module))
if hasattr(module, "additional_tests"):
tests.append(module.additional_tests())
if hasattr(module, '__path__'):
for file in resource_listdir(module.__name__, ''):
if file.endswith('.py') and file != '__init__.py':
submodule = module.__name__ + '.' + file[:-3]
else:
if resource_exists(module.__name__, file + '/__init__.py'):
submodule = module.__name__ + '.' + file
else:
continue
tests.append(self.loadTestsFromName(submodule))
if len(tests) != 1:
return self.suiteClass(tests)
else:
return tests[0] # don't create a nested suite for only one return
class test(Command):
"""Command to run unit tests after in-place build"""
description = "run unit tests after in-place build"
user_options = [
('test-module=', 'm', "Run 'test_suite' in specified module"),
('test-suite=', 's',
"Test suite to run (e.g. 'some_module.test_suite')"),
('test-runner=', 'r', "Test runner to use"),
]
def initialize_options(self):
self.test_suite = None
self.test_module = None
self.test_loader = None
self.test_runner = None
def finalize_options(self):
if self.test_suite is None:
if self.test_module is None:
self.test_suite = self.distribution.test_suite
else:
self.test_suite = self.test_module + ".test_suite"
elif self.test_module:
raise DistutilsOptionError(
"You may specify a module or a suite, but not both"
)
self.test_args = [self.test_suite]
if self.verbose:
self.test_args.insert(0, '--verbose')
if self.test_loader is None:
self.test_loader = getattr(self.distribution, 'test_loader', None)
if self.test_loader is None:
self.test_loader = "setuptools.command.test:ScanningLoader"
if self.test_runner is None:
self.test_runner = getattr(self.distribution, 'test_runner', None)
def with_project_on_sys_path(self, func):
with_2to3 = PY3 and getattr(self.distribution, 'use_2to3', False)
if with_2to3:
# If we run 2to3 we can not do this inplace:
# Ensure metadata is up-to-date
self.reinitialize_command('build_py', inplace=0)
self.run_command('build_py')
bpy_cmd = self.get_finalized_command("build_py")
build_path = normalize_path(bpy_cmd.build_lib)
# Build extensions
self.reinitialize_command('egg_info', egg_base=build_path)
self.run_command('egg_info')
self.reinitialize_command('build_ext', inplace=0)
self.run_command('build_ext')
else:
# Without 2to3 inplace works fine:
self.run_command('egg_info')
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
ei_cmd = self.get_finalized_command("egg_info")
old_path = sys.path[:]
old_modules = sys.modules.copy()
try:
sys.path.insert(0, normalize_path(ei_cmd.egg_base))
working_set.__init__()
add_activation_listener(lambda dist: dist.activate())
require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version))
func()
finally:
sys.path[:] = old_path
sys.modules.clear()
sys.modules.update(old_modules)
working_set.__init__()
def run(self):
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(
self.distribution.install_requires)
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
if self.test_suite:
cmd = ' '.join(self.test_args)
if self.dry_run:
self.announce('skipping "unittest %s" (dry run)' % cmd)
else:
self.announce('running "unittest %s"' % cmd)
self.with_project_on_sys_path(self.run_tests)
def run_tests(self):
# Purge modules under test from sys.modules. The test loader will
# re-import them from the build location. Required when 2to3 is used
# with namespace packages.
if PY3 and getattr(self.distribution, 'use_2to3', False):
module = self.test_args[-1].split('.')[0]
if module in _namespace_packages:
del_modules = []
if module in sys.modules:
del_modules.append(module)
module += '.'
for name in sys.modules:
if name.startswith(module):
del_modules.append(name)
list(map(sys.modules.__delitem__, del_modules))
unittest_main(
None, None, [unittest.__file__] + self.test_args,
testLoader=self._resolve_as_ep(self.test_loader),
testRunner=self._resolve_as_ep(self.test_runner),
)
@staticmethod
def _resolve_as_ep(val):
"""
Load the indicated attribute value, called, as a as if it were
specified as an entry point.
"""
if val is None:
return
parsed = EntryPoint.parse("x=" + val)
return parsed.resolve()()
| mpl-2.0 | -689,609,350,898,940,000 | 35.977143 | 79 | 0.5673 | false |
uchida/selenium | py/selenium/webdriver/ie/service.py | 8 | 2235 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium.webdriver.common import service
class Service(service.Service):
"""
Object that manages the starting and stopping of the IEDriver
"""
def __init__(self, executable_path, port=0, host=None, log_level=None, log_file=None):
"""
Creates a new instance of the Service
:Args:
- executable_path : Path to the IEDriver
- port : Port the service is running on
- host : IP address the service port is bound
- log_level : Level of logging of service, may be "FATAL", "ERROR", "WARN", "INFO", "DEBUG", "TRACE".
Default is "FATAL".
- log_file : Target of logging of service, may be "stdout", "stderr" or file path.
Default is "stdout"."""
self.service_args = []
if host is not None:
self.service_args.append("--host=%s" % host)
if log_level is not None:
self.service_args.append("--log-level=%s" % log_level)
if log_file is not None:
self.service_args.append("--log-file=%s" % log_file)
service.Service.__init__(self, executable_path, port=port,
start_error_message="Please download from http://selenium-release.storage.googleapis.com/index.html and read up at https://github.com/SeleniumHQ/selenium/wiki/InternetExplorerDriver")
def command_line_args(self):
return ["--port=%d" % self.port] + self.service_args
| apache-2.0 | -3,066,655,136,672,969,000 | 43.7 | 216 | 0.667114 | false |
Adjective-Object/ligament | ligament/__init__.py | 1 | 2250 | """
A python task automator, with a focus on task composition
"""
import ligament
import helpers
import buildtarget
import buildcontext
import buildcontextfseventhandler
import exceptions
import sys
import getopt
from helpers import pout
def print_helptext():
""" print helptext for command-line interface """
pout("usage: %s [flags] [tasks]\n"
" Execute tasks in order. if no tasks are specified, tries to\n"
" execute task 'default'\n"
" flags:\n"
" -w / --watch watch files/folders for changes and update\n"
" -h / --help display this helptext" % sys.argv[0])
def main():
""" parse command line opts and run a skeleton file
when called from the command line, ligament looks in the current
working directory for a file called `skeleton.py`. Tasks specified from
the command line are then executed in order, and if the -w flag was
specified, ligament then watches the filesystem for changes to
prompt task re-execution;
"""
options = None
try:
options, args = getopt.gnu_getopt(
sys.argv[1:],
"whqv",
["watch", "help", "query", "verbose"])
except getopt.GetoptError as e:
print e
print_helptext()
exit(1)
should_watch = False
query_skeleton = False
verbose = False
for opt, arg in options:
if opt == "--watch" or opt == '-w':
should_watch = True
elif opt == "--query" or opt == '-q':
query_skeleton = True
elif opt == "--help" or opt == '-h':
print_helptext()
exit(0)
elif opt == "--verbose" or opt == '-v':
verbose = True
else:
print "option '%s' not recognized" % opt
print_helptext()
exit(1)
if verbose:
helpers.set_verbosity(".*")
if query_skeleton:
print " ".join(ligament.query_skeleton("./skeleton.py"))
helpers.set_verbosity()
else:
helpers.add_verbosity_groups("build_task")
ligament.run_skeleton(
"./skeleton.py",
["default"] if len(args) == 0 else args,
watch=should_watch)
| apache-2.0 | 2,881,781,953,091,548,700 | 27.846154 | 79 | 0.569778 | false |
edx/edx-load-tests | loadtests/ecommerce_deadlock/authentication.py | 1 | 2577 | import uuid
from locust import TaskSet
from helpers import settings
LMS_HOST = settings.data['LMS_HOST'].rstrip('\/')
class AuthenticationMixin(TaskSet):
"""Mixin that gives tasks different authentication methods."""
def __init__(self, parent):
super().__init__(parent)
self._setup_basic_auth()
def on_start(self):
"""Raise an exception. This TaskSet is not meant to be run on its own."""
raise Exception("This TaskSet is not meant to be run")
def auto_auth_login(self):
"""Create a new account with given credentials and log in.
This requires that the test server has the feature flag
`AUTOMATIC_AUTH_FOR_TESTING` set to True.
"""
auto_auth_username = "ecom_deadlock"
auto_auth_email_url = "simulator.amazonses.com"
default_password = "test"
del self.client.cookies["sessionid"]
username = "{0}{1}".format(auto_auth_username, uuid.uuid4().hex[:20])
full_email = "{0}@{1}".format(username, auto_auth_email_url)
params = {
'password': default_password,
'email': full_email,
'username': username,
}
self.client.get("/auto_auth", params=params, verify=False, name="/auto_auth")
def login(self, email, password):
"""
Login to the LMS for the given credentials
Args:
email (str): User email
password (str): User's password
"""
signin_url = '{}/login'.format(LMS_HOST)
headers = self._get_csrf(signin_url)
login_url = '{}/login_ajax'.format(LMS_HOST)
response = self.client.post(login_url, {
'email': email,
'password': password,
'honor_code': 'true'
}, headers=headers).json()
if not response['success']:
raise Exception(str(response))
print('Login successful {}'.format(email))
def _get_csrf(self, url):
try:
response = self.client.get(url)
csrf = response.cookies['csrftoken']
return {'X-CSRFToken': csrf, 'Referer': url}
except Exception as error: # pylint: disable=W0703
print("Error when retrieving csrf token.", error)
def _setup_basic_auth(self):
"""Sets up basic authentication if available"""
if settings.data.get('BASIC_AUTH') is not None:
self.client.auth = (
settings.data['BASIC_AUTH']['BASIC_AUTH_USER'],
settings.data['BASIC_AUTH']['BASIC_AUTH_PASS'],
)
| apache-2.0 | 4,022,099,700,658,425,000 | 32.467532 | 85 | 0.578968 | false |
henriquejensen/marolo | controllers/default.py | 2 | 3682 | # -*- coding: utf-8 -*-
def index():
pagina = request.args(0, cast=int, default=1)
itens_por_pagina = 5
total = db(db.noticias).count()
paginas = total / itens_por_pagina
if total % itens_por_pagina:
paginas += 1
limites = (itens_por_pagina * (pagina - 1), (itens_por_pagina * pagina))
noticias = db(db.noticias.status == "publicado").select(
orderby=~db.noticias.created_on | ~db.noticias.modified_on,
limitby=limites
)
carousel = db(db.carousel.status == 'ativo').select(
orderby=~db.carousel.id,
limitby=(0, 4),
)
return {
'noticias': noticias,
'pagina': pagina,
'paginas': paginas,
'carousel': carousel
}
def noticias():
permalink = request.args(0, otherwise='/')
noticia = db.noticias(permalink=permalink)
if not noticia:
session.flash = T('Desculpe, não existem noticias cadastradas ainda')
redirect('/')
return dict(noticia=noticia)
def membros():
membros = db(db.membros).select().as_list()
if not membros:
session.flash = T('Desculpe, não existem membros cadastrados ainda')
redirect('/')
linhas = (len(membros) // 3) + 1
matriz = []
for _ in range(linhas):
aux = []
cont = 0
while cont < 3 and membros:
aux.append(membros.pop(0))
cont += 1
matriz.append(aux)
return {
'matriz': matriz
}
def associacao():
return {}
def projeto():
return {}
def eventos():
parametro_ano = request.args(0, cast=int, default=request.now.year)
ano_atual = request.now.year
eventos = db(db.eventos.dia.year() == parametro_ano).select()
if not eventos:
session.flash = T('Desculpe, não existem eventos cadastrados ainda')
redirect('/')
minimo = db.eventos.dia.min()
maximo = db.eventos.dia.max()
extremos = db().select(maximo, minimo).first()
menor_ano = extremos[minimo].year
maior_ano = extremos[maximo].year
anos_anteriores = []
anos_posteriores = []
for incremento in range(3, 0, -1):
ano_posterior = parametro_ano + incremento
if (ano_posterior <= maior_ano) or (ano_posterior <= ano_atual):
anos_posteriores.append(ano_posterior)
for subtrator in range(1, 4):
ano_anterior = parametro_ano - subtrator
if ano_anterior >= menor_ano:
anos_anteriores.append(ano_anterior)
return {
'eventos': eventos,
'ano': parametro_ano,
'anos_anteriores': anos_anteriores,
'anos_posteriores': anos_posteriores,
}
def contato():
if form.validate():
campos = form.vars
mail.send(
to=mail.settings.sender,
subject=campos['assunto'],
message=campos['mensagem'],
sender=campos['email']
)
return dict(form=form)
def produtos():
pagina = request.args(0, cast=int, default=1)
itens_por_pagina = 9
total = db(db.produtos).count()
if not total:
session.flash = T('Desculpe, não existem produtos cadastrados ainda')
redirect('/')
paginas = total / itens_por_pagina
if total % itens_por_pagina:
paginas += 1
limites = (itens_por_pagina * (pagina - 1), (itens_por_pagina * pagina))
produtos = db(db.produtos).select(
limitby=limites
)
return {
'produtos': produtos,
'pagina': pagina,
'paginas': paginas
}
@cache.action()
def download():
"""
allows downloading of uploaded files
http://..../[app]/default/download/[filename]
"""
return response.download(request, db)
| mit | -7,240,190,339,175,269,000 | 26.654135 | 77 | 0.587819 | false |
CloudWareChile/OpenChile | openerp/addons/account_analytic_plans/account_analytic_plans.py | 6 | 24178 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from lxml import etree
from osv import fields, osv
import tools
from tools.translate import _
class one2many_mod2(fields.one2many):
def get(self, cr, obj, ids, name, user=None, offset=0, context=None, values=None):
if context is None:
context = {}
res = {}
for id in ids:
res[id] = []
ids2 = None
if 'journal_id' in context:
journal = obj.pool.get('account.journal').browse(cr, user, context['journal_id'], context=context)
pnum = int(name[7]) -1
plan = journal.plan_id
if plan and len(plan.plan_ids) > pnum:
acc_id = plan.plan_ids[pnum].root_analytic_id.id
ids2 = obj.pool.get(self._obj).search(cr, user, [(self._fields_id,'in',ids),('analytic_account_id','child_of',[acc_id])], limit=self._limit)
if ids2 is None:
ids2 = obj.pool.get(self._obj).search(cr, user, [(self._fields_id,'in',ids)], limit=self._limit)
for r in obj.pool.get(self._obj)._read_flat(cr, user, ids2, [self._fields_id], context=context, load='_classic_write'):
res[r[self._fields_id]].append( r['id'] )
return res
class account_analytic_line(osv.osv):
_inherit = 'account.analytic.line'
_description = 'Analytic Line'
def _get_amount(self, cr, uid, ids, name, args, context=None):
res = {}
for id in ids:
res.setdefault(id, 0.0)
for line in self.browse(cr, uid, ids, context=context):
amount = line.move_id and line.move_id.amount_currency * (line.percentage / 100) or 0.0
res[line.id] = amount
return res
_columns = {
'amount_currency': fields.function(_get_amount, string="Amount Currency", type="float", store=True, help="The amount expressed in the related account currency if not equal to the company one.", readonly=True),
'percentage': fields.float('Percentage')
}
account_analytic_line()
class account_analytic_plan(osv.osv):
_name = "account.analytic.plan"
_description = "Analytic Plan"
_columns = {
'name': fields.char('Analytic Plan', size=64, required=True, select=True),
'plan_ids': fields.one2many('account.analytic.plan.line', 'plan_id', 'Analytic Plans'),
}
account_analytic_plan()
class account_analytic_plan_line(osv.osv):
_name = "account.analytic.plan.line"
_description = "Analytic Plan Line"
_order = "sequence, id"
_columns = {
'plan_id': fields.many2one('account.analytic.plan','Analytic Plan'),
'name': fields.char('Plan Name', size=64, required=True, select=True),
'sequence': fields.integer('Sequence'),
'root_analytic_id': fields.many2one('account.analytic.account', 'Root Account', help="Root account of this plan.", required=False),
'min_required': fields.float('Minimum Allowed (%)'),
'max_required': fields.float('Maximum Allowed (%)'),
}
_defaults = {
'min_required': 100.0,
'max_required': 100.0,
}
account_analytic_plan_line()
class account_analytic_plan_instance(osv.osv):
_name = "account.analytic.plan.instance"
_description = "Analytic Plan Instance"
_columns = {
'name': fields.char('Analytic Distribution', size=64),
'code': fields.char('Distribution Code', size=16),
'journal_id': fields.many2one('account.analytic.journal', 'Analytic Journal' ),
'account_ids': fields.one2many('account.analytic.plan.instance.line', 'plan_id', 'Account Id'),
'account1_ids': one2many_mod2('account.analytic.plan.instance.line', 'plan_id', 'Account1 Id'),
'account2_ids': one2many_mod2('account.analytic.plan.instance.line', 'plan_id', 'Account2 Id'),
'account3_ids': one2many_mod2('account.analytic.plan.instance.line', 'plan_id', 'Account3 Id'),
'account4_ids': one2many_mod2('account.analytic.plan.instance.line', 'plan_id', 'Account4 Id'),
'account5_ids': one2many_mod2('account.analytic.plan.instance.line', 'plan_id', 'Account5 Id'),
'account6_ids': one2many_mod2('account.analytic.plan.instance.line', 'plan_id', 'Account6 Id'),
'plan_id': fields.many2one('account.analytic.plan', "Model's Plan"),
}
def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
if context is None:
context = {}
journal_obj = self.pool.get('account.journal')
if context.get('journal_id', False):
journal = journal_obj.browse(cr, user, [context['journal_id']], context=context)[0]
analytic_journal = journal.analytic_journal_id and journal.analytic_journal_id.id or False
args.append('|')
args.append(('journal_id', '=', analytic_journal))
args.append(('journal_id', '=', False))
res = super(account_analytic_plan_instance, self).search(cr, user, args, offset=offset, limit=limit, order=order,
context=context, count=count)
return res
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({'account1_ids':False, 'account2_ids':False, 'account3_ids':False,
'account4_ids':False, 'account5_ids':False, 'account6_ids':False})
return super(account_analytic_plan_instance, self).copy(cr, uid, id, default, context=context)
def _default_journal(self, cr, uid, context=None):
if context is None:
context = {}
journal_obj = self.pool.get('account.journal')
if context.has_key('journal_id') and context['journal_id']:
journal = journal_obj.browse(cr, uid, context['journal_id'], context=context)
if journal.analytic_journal_id:
return journal.analytic_journal_id.id
return False
_defaults = {
'plan_id': False,
'journal_id': _default_journal,
}
def name_get(self, cr, uid, ids, context=None):
res = []
for inst in self.browse(cr, uid, ids, context=context):
name = inst.name or '/'
if name and inst.code:
name=name+' ('+inst.code+')'
res.append((inst.id, name))
return res
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
args = args or []
if name:
ids = self.search(cr, uid, [('code', '=', name)] + args, limit=limit, context=context or {})
if not ids:
ids = self.search(cr, uid, [('name', operator, name)] + args, limit=limit, context=context or {})
else:
ids = self.search(cr, uid, args, limit=limit, context=context or {})
return self.name_get(cr, uid, ids, context or {})
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
if context is None:
context = {}
wiz_id = self.pool.get('ir.actions.act_window').search(cr, uid, [("name","=","analytic.plan.create.model.action")], context=context)
res = super(account_analytic_plan_instance,self).fields_view_get(cr, uid, view_id, view_type, context, toolbar=toolbar, submenu=submenu)
journal_obj = self.pool.get('account.journal')
analytic_plan_obj = self.pool.get('account.analytic.plan')
if (res['type']=='form'):
plan_id = False
if context.get('journal_id', False):
plan_id = journal_obj.browse(cr, uid, int(context['journal_id']), context=context).plan_id
elif context.get('plan_id', False):
plan_id = analytic_plan_obj.browse(cr, uid, int(context['plan_id']), context=context)
if plan_id:
i=1
res['arch'] = """<form string="%s">
<field name="name"/>
<field name="code"/>
<field name="journal_id"/>
<button name="%d" string="Save This Distribution as a Model" type="action" colspan="2"/>
"""% (tools.to_xml(plan_id.name), wiz_id[0])
for line in plan_id.plan_ids:
res['arch']+="""
<field name="account%d_ids" string="%s" nolabel="1" colspan="4">
<tree string="%s" editable="bottom">
<field name="rate"/>
<field name="analytic_account_id" domain="[('parent_id','child_of',[%d])]" groups="base.group_extended"/>
</tree>
</field>
<newline/>"""%(i,tools.to_xml(line.name),tools.to_xml(line.name),line.root_analytic_id and line.root_analytic_id.id or 0)
i+=1
res['arch'] += "</form>"
doc = etree.fromstring(res['arch'].encode('utf8'))
xarch, xfields = self._view_look_dom_arch(cr, uid, doc, view_id, context=context)
res['arch'] = xarch
res['fields'] = xfields
return res
else:
return res
def create(self, cr, uid, vals, context=None):
journal_obj = self.pool.get('account.journal')
ana_plan_instance_obj = self.pool.get('account.analytic.plan.instance')
acct_anal_acct = self.pool.get('account.analytic.account')
acct_anal_plan_line_obj = self.pool.get('account.analytic.plan.line')
if context and 'journal_id' in context:
journal = journal_obj.browse(cr, uid, context['journal_id'], context=context)
pids = ana_plan_instance_obj.search(cr, uid, [('name','=',vals['name']), ('code','=',vals['code']), ('plan_id','<>',False)], context=context)
if pids:
raise osv.except_osv(_('Error'), _('A model having this name and code already exists !'))
res = acct_anal_plan_line_obj.search(cr, uid, [('plan_id','=',journal.plan_id.id)], context=context)
for i in res:
total_per_plan = 0
item = acct_anal_plan_line_obj.browse(cr, uid, i, context=context)
temp_list = ['account1_ids','account2_ids','account3_ids','account4_ids','account5_ids','account6_ids']
for l in temp_list:
if vals.has_key(l):
for tempo in vals[l]:
if acct_anal_acct.search(cr, uid, [('parent_id', 'child_of', [item.root_analytic_id.id]), ('id', '=', tempo[2]['analytic_account_id'])], context=context):
total_per_plan += tempo[2]['rate']
if total_per_plan < item.min_required or total_per_plan > item.max_required:
raise osv.except_osv(_('Value Error'),_('The Total Should be Between %s and %s') % (str(item.min_required), str(item.max_required)))
return super(account_analytic_plan_instance, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None, check=True, update_check=True):
if context is None:
context = {}
this = self.browse(cr, uid, ids[0], context=context)
invoice_line_obj = self.pool.get('account.invoice.line')
if this.plan_id and not vals.has_key('plan_id'):
#this instance is a model, so we have to create a new plan instance instead of modifying it
#copy the existing model
temp_id = self.copy(cr, uid, this.id, None, context=context)
#get the list of the invoice line that were linked to the model
lists = invoice_line_obj.search(cr, uid, [('analytics_id','=',this.id)], context=context)
#make them link to the copy
invoice_line_obj.write(cr, uid, lists, {'analytics_id':temp_id}, context=context)
#and finally modify the old model to be not a model anymore
vals['plan_id'] = False
if not vals.has_key('name'):
vals['name'] = this.name and (str(this.name)+'*') or "*"
if not vals.has_key('code'):
vals['code'] = this.code and (str(this.code)+'*') or "*"
return super(account_analytic_plan_instance, self).write(cr, uid, ids, vals, context=context)
account_analytic_plan_instance()
class account_analytic_plan_instance_line(osv.osv):
_name = "account.analytic.plan.instance.line"
_description = "Analytic Instance Line"
_columns = {
'plan_id': fields.many2one('account.analytic.plan.instance', 'Plan Id'),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account', required=True, domain=[('type','<>','view')]),
'rate': fields.float('Rate (%)', required=True),
}
_defaults = {
'rate': 100.0
}
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
reads = self.read(cr, uid, ids, ['analytic_account_id'], context=context)
res = []
for record in reads:
res.append((record['id'], record['analytic_account_id']))
return res
account_analytic_plan_instance_line()
class account_journal(osv.osv):
_inherit = "account.journal"
_name = "account.journal"
_columns = {
'plan_id': fields.many2one('account.analytic.plan', 'Analytic Plans'),
}
account_journal()
class account_invoice_line(osv.osv):
_inherit = "account.invoice.line"
_name = "account.invoice.line"
_columns = {
'analytics_id': fields.many2one('account.analytic.plan.instance', 'Analytic Distribution'),
}
def create(self, cr, uid, vals, context=None):
if 'analytics_id' in vals and isinstance(vals['analytics_id'], tuple):
vals['analytics_id'] = vals['analytics_id'][0]
return super(account_invoice_line, self).create(cr, uid, vals, context=context)
def move_line_get_item(self, cr, uid, line, context=None):
res = super(account_invoice_line, self).move_line_get_item(cr, uid, line, context=context)
res ['analytics_id'] = line.analytics_id and line.analytics_id.id or False
return res
def product_id_change(self, cr, uid, ids, product, uom, qty=0, name='', type='out_invoice', partner_id=False, fposition_id=False, price_unit=False, address_invoice_id=False, currency_id=False, context=None, company_id=None):
res_prod = super(account_invoice_line, self).product_id_change(cr, uid, ids, product, uom, qty, name, type, partner_id, fposition_id, price_unit, address_invoice_id, currency_id, context=context, company_id=company_id)
rec = self.pool.get('account.analytic.default').account_get(cr, uid, product, partner_id, uid, time.strftime('%Y-%m-%d'), context=context)
if rec and rec.analytics_id:
res_prod['value'].update({'analytics_id': rec.analytics_id.id})
return res_prod
account_invoice_line()
class account_move_line(osv.osv):
_inherit = "account.move.line"
_name = "account.move.line"
_columns = {
'analytics_id':fields.many2one('account.analytic.plan.instance', 'Analytic Distribution'),
}
def _default_get_move_form_hook(self, cursor, user, data):
data = super(account_move_line, self)._default_get_move_form_hook(cursor, user, data)
if data.has_key('analytics_id'):
del(data['analytics_id'])
return data
def create_analytic_lines(self, cr, uid, ids, context=None):
if context is None:
context = {}
super(account_move_line, self).create_analytic_lines(cr, uid, ids, context=context)
analytic_line_obj = self.pool.get('account.analytic.line')
for line in self.browse(cr, uid, ids, context=context):
if line.analytics_id:
if not line.journal_id.analytic_journal_id:
raise osv.except_osv(_('No Analytic Journal !'),_("You have to define an analytic journal on the '%s' journal!") % (line.journal_id.name,))
toremove = analytic_line_obj.search(cr, uid, [('move_id','=',line.id)], context=context)
if toremove:
analytic_line_obj.unlink(cr, uid, toremove, context=context)
for line2 in line.analytics_id.account_ids:
val = (line.credit or 0.0) - (line.debit or 0.0)
amt=val * (line2.rate/100)
al_vals={
'name': line.name,
'date': line.date,
'account_id': line2.analytic_account_id.id,
'unit_amount': line.quantity,
'product_id': line.product_id and line.product_id.id or False,
'product_uom_id': line.product_uom_id and line.product_uom_id.id or False,
'amount': amt,
'general_account_id': line.account_id.id,
'move_id': line.id,
'journal_id': line.journal_id.analytic_journal_id.id,
'ref': line.ref,
'percentage': line2.rate
}
analytic_line_obj.create(cr, uid, al_vals, context=context)
return True
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
if context is None:
context = {}
result = super(account_move_line, self).fields_view_get(cr, uid, view_id, view_type, context, toolbar=toolbar, submenu=submenu)
return result
account_move_line()
class account_invoice(osv.osv):
_name = "account.invoice"
_inherit = "account.invoice"
def line_get_convert(self, cr, uid, x, part, date, context=None):
res=super(account_invoice,self).line_get_convert(cr, uid, x, part, date, context=context)
res['analytics_id'] = x.get('analytics_id', False)
return res
def _get_analytic_lines(self, cr, uid, id, context=None):
inv = self.browse(cr, uid, [id])[0]
cur_obj = self.pool.get('res.currency')
invoice_line_obj = self.pool.get('account.invoice.line')
acct_ins_obj = self.pool.get('account.analytic.plan.instance')
company_currency = inv.company_id.currency_id.id
if inv.type in ('out_invoice', 'in_refund'):
sign = 1
else:
sign = -1
iml = invoice_line_obj.move_line_get(cr, uid, inv.id, context=context)
for il in iml:
if il.get('analytics_id', False):
if inv.type in ('in_invoice', 'in_refund'):
ref = inv.reference
else:
ref = self._convert_ref(cr, uid, inv.number)
obj_move_line = acct_ins_obj.browse(cr, uid, il['analytics_id'], context=context)
ctx = context.copy()
ctx.update({'date': inv.date_invoice})
amount_calc = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, il['price'], context=ctx) * sign
qty = il['quantity']
il['analytic_lines'] = []
for line2 in obj_move_line.account_ids:
amt = amount_calc * (line2.rate/100)
qtty = qty* (line2.rate/100)
al_vals = {
'name': il['name'],
'date': inv['date_invoice'],
'unit_amount': qtty,
'product_id': il['product_id'],
'account_id': line2.analytic_account_id.id,
'amount': amt,
'product_uom_id': il['uos_id'],
'general_account_id': il['account_id'],
'journal_id': self._get_journal_analytic(cr, uid, inv.type),
'ref': ref,
}
il['analytic_lines'].append((0, 0, al_vals))
return iml
account_invoice()
class account_analytic_plan(osv.osv):
_inherit = "account.analytic.plan"
_columns = {
'default_instance_id': fields.many2one('account.analytic.plan.instance', 'Default Entries'),
}
account_analytic_plan()
class analytic_default(osv.osv):
_inherit = "account.analytic.default"
_columns = {
'analytics_id': fields.many2one('account.analytic.plan.instance', 'Analytic Distribution'),
}
analytic_default()
class sale_order_line(osv.osv):
_inherit = "sale.order.line"
# Method overridden to set the analytic account by default on criterion match
def invoice_line_create(self, cr, uid, ids, context=None):
create_ids = super(sale_order_line,self).invoice_line_create(cr, uid, ids, context=context)
inv_line_obj = self.pool.get('account.invoice.line')
acct_anal_def_obj = self.pool.get('account.analytic.default')
if ids:
sale_line = self.browse(cr, uid, ids[0], context=context)
for line in inv_line_obj.browse(cr, uid, create_ids, context=context):
rec = acct_anal_def_obj.account_get(cr, uid, line.product_id.id, sale_line.order_id.partner_id.id, uid, time.strftime('%Y-%m-%d'), context)
if rec:
inv_line_obj.write(cr, uid, [line.id], {'analytics_id': rec.analytics_id.id}, context=context)
return create_ids
sale_order_line()
class account_bank_statement(osv.osv):
_inherit = "account.bank.statement"
_name = "account.bank.statement"
def create_move_from_st_line(self, cr, uid, st_line_id, company_currency_id, st_line_number, context=None):
account_move_line_pool = self.pool.get('account.move.line')
account_bank_statement_line_pool = self.pool.get('account.bank.statement.line')
st_line = account_bank_statement_line_pool.browse(cr, uid, st_line_id, context=context)
result = super(account_bank_statement,self).create_move_from_st_line(cr, uid, st_line_id, company_currency_id, st_line_number, context=context)
move = st_line.move_ids and st_line.move_ids[0] or False
if move:
for line in move.line_id:
account_move_line_pool.write(cr, uid, [line.id], {'analytics_id':st_line.analytics_id.id}, context=context)
return result
def button_confirm_bank(self, cr, uid, ids, context=None):
super(account_bank_statement,self).button_confirm_bank(cr, uid, ids, context=context)
for st in self.browse(cr, uid, ids, context=context):
for st_line in st.line_ids:
if st_line.analytics_id:
if not st.journal_id.analytic_journal_id:
raise osv.except_osv(_('No Analytic Journal !'),_("You have to define an analytic journal on the '%s' journal!") % (st.journal_id.name,))
if not st_line.amount:
continue
return True
account_bank_statement()
class account_bank_statement_line(osv.osv):
_inherit = "account.bank.statement.line"
_name = "account.bank.statement.line"
_columns = {
'analytics_id': fields.many2one('account.analytic.plan.instance', 'Analytic Distribution'),
}
account_bank_statement_line()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -7,756,863,172,319,786,000 | 47.259481 | 228 | 0.587766 | false |
jonathanverner/brython | www/src/Lib/test/test_imp.py | 23 | 19284 | import imp
import importlib
import os
import os.path
import shutil
import sys
from test import support
from test.test_importlib import util
import unittest
import warnings
class LockTests(unittest.TestCase):
"""Very basic test of import lock functions."""
def verify_lock_state(self, expected):
self.assertEqual(imp.lock_held(), expected,
"expected imp.lock_held() to be %r" % expected)
def testLock(self):
LOOPS = 50
# The import lock may already be held, e.g. if the test suite is run
# via "import test.autotest".
lock_held_at_start = imp.lock_held()
self.verify_lock_state(lock_held_at_start)
for i in range(LOOPS):
imp.acquire_lock()
self.verify_lock_state(True)
for i in range(LOOPS):
imp.release_lock()
# The original state should be restored now.
self.verify_lock_state(lock_held_at_start)
if not lock_held_at_start:
try:
imp.release_lock()
except RuntimeError:
pass
else:
self.fail("release_lock() without lock should raise "
"RuntimeError")
class ImportTests(unittest.TestCase):
def setUp(self):
mod = importlib.import_module('test.encoded_modules')
self.test_strings = mod.test_strings
self.test_path = mod.__path__
def test_import_encoded_module(self):
for modname, encoding, teststr in self.test_strings:
mod = importlib.import_module('test.encoded_modules.'
'module_' + modname)
self.assertEqual(teststr, mod.test)
def test_find_module_encoding(self):
for mod, encoding, _ in self.test_strings:
with imp.find_module('module_' + mod, self.test_path)[0] as fd:
self.assertEqual(fd.encoding, encoding)
path = [os.path.dirname(__file__)]
with self.assertRaises(SyntaxError):
imp.find_module('badsyntax_pep3120', path)
def test_issue1267(self):
for mod, encoding, _ in self.test_strings:
fp, filename, info = imp.find_module('module_' + mod,
self.test_path)
with fp:
self.assertNotEqual(fp, None)
self.assertEqual(fp.encoding, encoding)
self.assertEqual(fp.tell(), 0)
self.assertEqual(fp.readline(), '# test %s encoding\n'
% encoding)
fp, filename, info = imp.find_module("tokenize")
with fp:
self.assertNotEqual(fp, None)
self.assertEqual(fp.encoding, "utf-8")
self.assertEqual(fp.tell(), 0)
self.assertEqual(fp.readline(),
'"""Tokenization help for Python programs.\n')
def test_issue3594(self):
temp_mod_name = 'test_imp_helper'
sys.path.insert(0, '.')
try:
with open(temp_mod_name + '.py', 'w') as file:
file.write("# coding: cp1252\nu = 'test.test_imp'\n")
file, filename, info = imp.find_module(temp_mod_name)
file.close()
self.assertEqual(file.encoding, 'cp1252')
finally:
del sys.path[0]
support.unlink(temp_mod_name + '.py')
support.unlink(temp_mod_name + '.pyc')
support.unlink(temp_mod_name + '.pyo')
def test_issue5604(self):
# Test cannot cover imp.load_compiled function.
# Martin von Loewis note what shared library cannot have non-ascii
# character because init_xxx function cannot be compiled
# and issue never happens for dynamic modules.
# But sources modified to follow generic way for processing pathes.
# the return encoding could be uppercase or None
fs_encoding = sys.getfilesystemencoding()
# covers utf-8 and Windows ANSI code pages
# one non-space symbol from every page
# (http://en.wikipedia.org/wiki/Code_page)
known_locales = {
'utf-8' : b'\xc3\xa4',
'cp1250' : b'\x8C',
'cp1251' : b'\xc0',
'cp1252' : b'\xc0',
'cp1253' : b'\xc1',
'cp1254' : b'\xc0',
'cp1255' : b'\xe0',
'cp1256' : b'\xe0',
'cp1257' : b'\xc0',
'cp1258' : b'\xc0',
}
if sys.platform == 'darwin':
self.assertEqual(fs_encoding, 'utf-8')
# Mac OS X uses the Normal Form D decomposition
# http://developer.apple.com/mac/library/qa/qa2001/qa1173.html
special_char = b'a\xcc\x88'
else:
special_char = known_locales.get(fs_encoding)
if not special_char:
self.skipTest("can't run this test with %s as filesystem encoding"
% fs_encoding)
decoded_char = special_char.decode(fs_encoding)
temp_mod_name = 'test_imp_helper_' + decoded_char
test_package_name = 'test_imp_helper_package_' + decoded_char
init_file_name = os.path.join(test_package_name, '__init__.py')
try:
# if the curdir is not in sys.path the test fails when run with
# ./python ./Lib/test/regrtest.py test_imp
sys.path.insert(0, os.curdir)
with open(temp_mod_name + '.py', 'w') as file:
file.write('a = 1\n')
file, filename, info = imp.find_module(temp_mod_name)
with file:
self.assertIsNotNone(file)
self.assertTrue(filename[:-3].endswith(temp_mod_name))
self.assertEqual(info[0], '.py')
self.assertEqual(info[1], 'U')
self.assertEqual(info[2], imp.PY_SOURCE)
mod = imp.load_module(temp_mod_name, file, filename, info)
self.assertEqual(mod.a, 1)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
mod = imp.load_source(temp_mod_name, temp_mod_name + '.py')
self.assertEqual(mod.a, 1)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
if not sys.dont_write_bytecode:
mod = imp.load_compiled(
temp_mod_name,
imp.cache_from_source(temp_mod_name + '.py'))
self.assertEqual(mod.a, 1)
if not os.path.exists(test_package_name):
os.mkdir(test_package_name)
with open(init_file_name, 'w') as file:
file.write('b = 2\n')
with warnings.catch_warnings():
warnings.simplefilter('ignore')
package = imp.load_package(test_package_name, test_package_name)
self.assertEqual(package.b, 2)
finally:
del sys.path[0]
for ext in ('.py', '.pyc', '.pyo'):
support.unlink(temp_mod_name + ext)
support.unlink(init_file_name + ext)
support.rmtree(test_package_name)
def test_issue9319(self):
path = os.path.dirname(__file__)
self.assertRaises(SyntaxError,
imp.find_module, "badsyntax_pep3120", [path])
def test_load_from_source(self):
# Verify that the imp module can correctly load and find .py files
# XXX (ncoghlan): It would be nice to use support.CleanImport
# here, but that breaks because the os module registers some
# handlers in copy_reg on import. Since CleanImport doesn't
# revert that registration, the module is left in a broken
# state after reversion. Reinitialising the module contents
# and just reverting os.environ to its previous state is an OK
# workaround
orig_path = os.path
orig_getenv = os.getenv
with support.EnvironmentVarGuard():
x = imp.find_module("os")
self.addCleanup(x[0].close)
new_os = imp.load_module("os", *x)
self.assertIs(os, new_os)
self.assertIs(orig_path, new_os.path)
self.assertIsNot(orig_getenv, new_os.getenv)
@support.cpython_only
@unittest.skipIf(not hasattr(imp, 'load_dynamic'),
'imp.load_dynamic() required')
def test_issue15828_load_extensions(self):
# Issue 15828 picked up that the adapter between the old imp API
# and importlib couldn't handle C extensions
example = "_heapq"
x = imp.find_module(example)
file_ = x[0]
if file_ is not None:
self.addCleanup(file_.close)
mod = imp.load_module(example, *x)
self.assertEqual(mod.__name__, example)
def test_load_dynamic_ImportError_path(self):
# Issue #1559549 added `name` and `path` attributes to ImportError
# in order to provide better detail. Issue #10854 implemented those
# attributes on import failures of extensions on Windows.
path = 'bogus file path'
name = 'extension'
with self.assertRaises(ImportError) as err:
imp.load_dynamic(name, path)
self.assertIn(path, err.exception.path)
self.assertEqual(name, err.exception.name)
@support.cpython_only
@unittest.skipIf(not hasattr(imp, 'load_dynamic'),
'imp.load_dynamic() required')
def test_load_module_extension_file_is_None(self):
# When loading an extension module and the file is None, open one
# on the behalf of imp.load_dynamic().
# Issue #15902
name = '_heapq'
found = imp.find_module(name)
if found[0] is not None:
found[0].close()
if found[2][2] != imp.C_EXTENSION:
return
imp.load_module(name, None, *found[1:])
def test_multiple_calls_to_get_data(self):
# Issue #18755: make sure multiple calls to get_data() can succeed.
loader = imp._LoadSourceCompatibility('imp', imp.__file__,
open(imp.__file__))
loader.get_data(imp.__file__) # File should be closed
loader.get_data(imp.__file__) # Will need to create a newly opened file
class ReloadTests(unittest.TestCase):
"""Very basic tests to make sure that imp.reload() operates just like
reload()."""
def test_source(self):
# XXX (ncoghlan): It would be nice to use test.support.CleanImport
# here, but that breaks because the os module registers some
# handlers in copy_reg on import. Since CleanImport doesn't
# revert that registration, the module is left in a broken
# state after reversion. Reinitialising the module contents
# and just reverting os.environ to its previous state is an OK
# workaround
with support.EnvironmentVarGuard():
import os
imp.reload(os)
def test_extension(self):
with support.CleanImport('time'):
import time
imp.reload(time)
def test_builtin(self):
with support.CleanImport('marshal'):
import marshal
imp.reload(marshal)
def test_with_deleted_parent(self):
# see #18681
from html import parser
html = sys.modules.pop('html')
def cleanup():
sys.modules['html'] = html
self.addCleanup(cleanup)
with self.assertRaisesRegex(ImportError, 'html'):
imp.reload(parser)
def test_module_replaced(self):
# see #18698
def code():
module = type(sys)('top_level')
module.spam = 3
sys.modules['top_level'] = module
mock = util.mock_modules('top_level',
module_code={'top_level': code})
with mock:
with util.import_state(meta_path=[mock]):
module = importlib.import_module('top_level')
reloaded = imp.reload(module)
actual = sys.modules['top_level']
self.assertEqual(actual.spam, 3)
self.assertEqual(reloaded.spam, 3)
class PEP3147Tests(unittest.TestCase):
"""Tests of PEP 3147."""
tag = imp.get_tag()
@unittest.skipUnless(sys.implementation.cache_tag is not None,
'requires sys.implementation.cache_tag not be None')
def test_cache_from_source(self):
# Given the path to a .py file, return the path to its PEP 3147
# defined .pyc file (i.e. under __pycache__).
path = os.path.join('foo', 'bar', 'baz', 'qux.py')
expect = os.path.join('foo', 'bar', 'baz', '__pycache__',
'qux.{}.pyc'.format(self.tag))
self.assertEqual(imp.cache_from_source(path, True), expect)
def test_cache_from_source_no_cache_tag(self):
# Non cache tag means NotImplementedError.
with support.swap_attr(sys.implementation, 'cache_tag', None):
with self.assertRaises(NotImplementedError):
imp.cache_from_source('whatever.py')
def test_cache_from_source_no_dot(self):
# Directory with a dot, filename without dot.
path = os.path.join('foo.bar', 'file')
expect = os.path.join('foo.bar', '__pycache__',
'file{}.pyc'.format(self.tag))
self.assertEqual(imp.cache_from_source(path, True), expect)
def test_cache_from_source_optimized(self):
# Given the path to a .py file, return the path to its PEP 3147
# defined .pyo file (i.e. under __pycache__).
path = os.path.join('foo', 'bar', 'baz', 'qux.py')
expect = os.path.join('foo', 'bar', 'baz', '__pycache__',
'qux.{}.pyo'.format(self.tag))
self.assertEqual(imp.cache_from_source(path, False), expect)
def test_cache_from_source_cwd(self):
path = 'foo.py'
expect = os.path.join('__pycache__', 'foo.{}.pyc'.format(self.tag))
self.assertEqual(imp.cache_from_source(path, True), expect)
def test_cache_from_source_override(self):
# When debug_override is not None, it can be any true-ish or false-ish
# value.
path = os.path.join('foo', 'bar', 'baz.py')
partial_expect = os.path.join('foo', 'bar', '__pycache__',
'baz.{}.py'.format(self.tag))
self.assertEqual(imp.cache_from_source(path, []), partial_expect + 'o')
self.assertEqual(imp.cache_from_source(path, [17]),
partial_expect + 'c')
# However if the bool-ishness can't be determined, the exception
# propagates.
class Bearish:
def __bool__(self): raise RuntimeError
with self.assertRaises(RuntimeError):
imp.cache_from_source('/foo/bar/baz.py', Bearish())
@unittest.skipUnless(os.sep == '\\' and os.altsep == '/',
'test meaningful only where os.altsep is defined')
def test_sep_altsep_and_sep_cache_from_source(self):
# Windows path and PEP 3147 where sep is right of altsep.
self.assertEqual(
imp.cache_from_source('\\foo\\bar\\baz/qux.py', True),
'\\foo\\bar\\baz\\__pycache__\\qux.{}.pyc'.format(self.tag))
@unittest.skipUnless(sys.implementation.cache_tag is not None,
'requires sys.implementation.cache_tag to not be '
'None')
def test_source_from_cache(self):
# Given the path to a PEP 3147 defined .pyc file, return the path to
# its source. This tests the good path.
path = os.path.join('foo', 'bar', 'baz', '__pycache__',
'qux.{}.pyc'.format(self.tag))
expect = os.path.join('foo', 'bar', 'baz', 'qux.py')
self.assertEqual(imp.source_from_cache(path), expect)
def test_source_from_cache_no_cache_tag(self):
# If sys.implementation.cache_tag is None, raise NotImplementedError.
path = os.path.join('blah', '__pycache__', 'whatever.pyc')
with support.swap_attr(sys.implementation, 'cache_tag', None):
with self.assertRaises(NotImplementedError):
imp.source_from_cache(path)
def test_source_from_cache_bad_path(self):
# When the path to a pyc file is not in PEP 3147 format, a ValueError
# is raised.
self.assertRaises(
ValueError, imp.source_from_cache, '/foo/bar/bazqux.pyc')
def test_source_from_cache_no_slash(self):
# No slashes at all in path -> ValueError
self.assertRaises(
ValueError, imp.source_from_cache, 'foo.cpython-32.pyc')
def test_source_from_cache_too_few_dots(self):
# Too few dots in final path component -> ValueError
self.assertRaises(
ValueError, imp.source_from_cache, '__pycache__/foo.pyc')
def test_source_from_cache_too_many_dots(self):
# Too many dots in final path component -> ValueError
self.assertRaises(
ValueError, imp.source_from_cache,
'__pycache__/foo.cpython-32.foo.pyc')
def test_source_from_cache_no__pycache__(self):
# Another problem with the path -> ValueError
self.assertRaises(
ValueError, imp.source_from_cache,
'/foo/bar/foo.cpython-32.foo.pyc')
def test_package___file__(self):
try:
m = __import__('pep3147')
except ImportError:
pass
else:
self.fail("pep3147 module already exists: %r" % (m,))
# Test that a package's __file__ points to the right source directory.
os.mkdir('pep3147')
sys.path.insert(0, os.curdir)
def cleanup():
if sys.path[0] == os.curdir:
del sys.path[0]
shutil.rmtree('pep3147')
self.addCleanup(cleanup)
# Touch the __init__.py file.
support.create_empty_file('pep3147/__init__.py')
importlib.invalidate_caches()
expected___file__ = os.sep.join(('.', 'pep3147', '__init__.py'))
m = __import__('pep3147')
self.assertEqual(m.__file__, expected___file__, (m.__file__, m.__path__, sys.path, sys.path_importer_cache))
# Ensure we load the pyc file.
support.unload('pep3147')
m = __import__('pep3147')
support.unload('pep3147')
self.assertEqual(m.__file__, expected___file__, (m.__file__, m.__path__, sys.path, sys.path_importer_cache))
class NullImporterTests(unittest.TestCase):
@unittest.skipIf(support.TESTFN_UNENCODABLE is None,
"Need an undecodeable filename")
def test_unencodeable(self):
name = support.TESTFN_UNENCODABLE
os.mkdir(name)
try:
self.assertRaises(ImportError, imp.NullImporter, name)
finally:
os.rmdir(name)
def test_main():
tests = [
ImportTests,
PEP3147Tests,
ReloadTests,
NullImporterTests,
]
try:
import _thread
except ImportError:
pass
else:
tests.append(LockTests)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
| bsd-3-clause | 6,684,130,573,024,004,000 | 39.175 | 116 | 0.56928 | false |
Distrotech/qtcreator | tests/system/tools/findUnusedObjects.py | 6 | 5941 | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2015 The Qt Company Ltd.
## Contact: http://www.qt.io/licensing
##
## This file is part of Qt Creator.
##
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms and
## conditions see http://www.qt.io/terms-conditions. For further information
## use the contact form at http://www.qt.io/contact-us.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 or version 3 as published by the Free
## Software Foundation and appearing in the file LICENSE.LGPLv21 and
## LICENSE.LGPLv3 included in the packaging of this file. Please review the
## following information to ensure the GNU Lesser General Public License
## requirements will be met: https://www.gnu.org/licenses/lgpl.html and
## http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, The Qt Company gives you certain additional
## rights. These rights are described in The Qt Company LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
#############################################################################
import os
import sys
import tokenize
from optparse import OptionParser
from toolfunctions import checkDirectory
from toolfunctions import getFileContent
objMap = None
lastToken = [None, None]
stopTokens = ('OP', 'NAME', 'NUMBER', 'ENDMARKER')
def parseCommandLine():
global directory, onlyRemovable
parser = OptionParser("\n%prog [OPTIONS] [DIRECTORY]")
parser.add_option("-o", "--only-removable", dest="onlyRemovable",
action="store_true", default=False,
help="list removable objects only")
(options, args) = parser.parse_args()
if len(args) == 0:
directory = os.path.abspath(".")
elif len(args) == 1:
directory = os.path.abspath(args[0])
else:
print "\nERROR: Too many arguments\n"
parser.print_help()
sys.exit(1)
onlyRemovable = options.onlyRemovable
def collectObjects():
global objMap
data = getFileContent(objMap)
return map(lambda x: x.strip().split("\t", 1)[0], data.strip().splitlines())
def handleStringsWithTrailingBackSlash(origStr):
try:
while True:
index = origStr.index("\\\n")
origStr = origStr[:index] + origStr[index+2:].lstrip()
except:
return origStr
def handle_token(tokenType, token, (startRow, startCol), (endRow, endCol), line):
global useCounts, lastToken, stopTokens
if tokenize.tok_name[tokenType] == 'STRING':
# concatenate strings followed directly by other strings
if lastToken[0] == 'STRING':
token = "'" + lastToken[1][1:-1] + str(token)[1:-1] + "'"
# store the new string as lastToken after removing potential trailing backslashes
# (including their following indentation)
lastToken = ['STRING' , handleStringsWithTrailingBackSlash(str(token))]
# if a stop token occurs check the potential string before it
elif tokenize.tok_name[tokenType] in stopTokens:
if lastToken[0] == 'STRING':
for obj in useCounts:
useCounts[obj] += lastToken[1].count("'%s'" % obj)
useCounts[obj] += lastToken[1].count('"%s"' % obj)
# store the stop token as lastToken
lastToken = [tokenize.tok_name[tokenType], str(token)]
def handleDataFiles(openFile, separator):
global useCounts
# ignore header line
openFile.readline()
for line in openFile:
currentTokens = line.split(separator)
for token in currentTokens:
stripped = token.strip().strip('"')
if stripped in useCounts:
useCounts[stripped] = useCounts[stripped] + 1
def findUsages():
global directory, objMap
suffixes = (".py", ".csv", ".tsv")
for root, dirnames, filenames in os.walk(directory):
for filename in filter(lambda x: x.endswith(suffixes), filenames):
currentFile = open(os.path.join(root, filename))
if filename.endswith(".py"):
tokenize.tokenize(currentFile.readline, handle_token)
elif filename.endswith(".csv"):
handleDataFiles(currentFile, ",")
elif filename.endswith(".tsv"):
handleDataFiles(currentFile, "\t")
currentFile.close()
currentFile = open(objMap)
tokenize.tokenize(currentFile.readline, handle_token)
currentFile.close()
def printResult():
global useCounts, onlyRemovable
print
if onlyRemovable:
if min(useCounts.values()) > 0:
print "All objects are used once at least.\n"
return False
print "Unused objects:\n"
for obj in filter(lambda x: useCounts[x] == 0, useCounts):
print "%s" % obj
return True
else:
length = max(map(len, useCounts.keys()))
outFormat = "%%%ds %%3d" % length
for obj,useCount in useCounts.iteritems():
print outFormat % (obj, useCount)
print
return None
def main():
global useCounts, objMap
objMap = checkDirectory(directory)
useCounts = dict.fromkeys(collectObjects(), 0)
findUsages()
atLeastOneRemovable = printResult()
if atLeastOneRemovable:
print "\nAfter removing the listed objects you should re-run this tool"
print "to find objects that might have been used only by these objects.\n"
return 0
if __name__ == '__main__':
parseCommandLine()
sys.exit(main())
| lgpl-2.1 | 3,091,050,271,457,245,700 | 38.085526 | 89 | 0.640296 | false |
pescobar/easybuild-framework | easybuild/toolchains/mpi/__init__.py | 2 | 1244 | ##
# Copyright 2012-2020 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Declares toolchains.mpi namespace.
:author: Stijn De Weirdt (Ghent University)
:author: Kenneth Hoste (Ghent University)
"""
__path__ = __import__('pkgutil').extend_path(__path__, __name__)
| gpl-2.0 | 5,614,074,828,948,896,000 | 39.129032 | 96 | 0.73955 | false |
mengqiy/kubernetes | cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py | 24 | 55854 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import json
import os
import random
import shutil
import subprocess
import time
import yaml
from charms.leadership import leader_get, leader_set
from pathlib import Path
from shlex import split
from subprocess import check_call, check_output
from subprocess import CalledProcessError
from socket import gethostname, getfqdn
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import endpoint_from_flag
from charms.reactive import set_state, remove_state, is_state
from charms.reactive import when, when_any, when_not, when_none
from charms.kubernetes.common import get_version
from charms.reactive.helpers import data_changed
from charms.templating.jinja2 import render
from charmhelpers.core import hookenv, unitdata
from charmhelpers.core.host import service_stop, service_restart
from charmhelpers.contrib.charmsupport import nrpe
# Override the default nagios shortname regex to allow periods, which we
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
kubeconfig_path = '/root/cdk/kubeconfig'
kubeproxyconfig_path = '/root/cdk/kubeproxyconfig'
kubeclientconfig_path = '/root/.kube/config'
gcp_creds_env_key = 'GOOGLE_APPLICATION_CREDENTIALS'
snap_resources = ['kubectl', 'kubelet', 'kube-proxy']
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
db = unitdata.kv()
@hook('upgrade-charm')
def upgrade_charm():
# migrate to new flags
if is_state('kubernetes-worker.restarted-for-cloud'):
remove_state('kubernetes-worker.restarted-for-cloud')
set_state('kubernetes-worker.cloud.ready')
if is_state('kubernetes-worker.cloud-request-sent'):
# minor change, just for consistency
remove_state('kubernetes-worker.cloud-request-sent')
set_state('kubernetes-worker.cloud.request-sent')
# Trigger removal of PPA docker installation if it was previously set.
set_state('config.changed.install_from_upstream')
hookenv.atexit(remove_state, 'config.changed.install_from_upstream')
cleanup_pre_snap_services()
migrate_resource_checksums()
check_resources_for_upgrade_needed()
# Remove the RC for nginx ingress if it exists
if hookenv.config().get('ingress'):
kubectl_success('delete', 'rc', 'nginx-ingress-controller')
# Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
# since they can differ between k8s versions
if is_state('kubernetes-worker.gpu.enabled'):
remove_state('kubernetes-worker.gpu.enabled')
try:
disable_gpu()
except ApplyNodeLabelFailed:
# Removing node label failed. Probably the master is unavailable.
# Proceed with the upgrade in hope GPUs will still be there.
hookenv.log('Failed to remove GPU labels. Proceed with upgrade.')
remove_state('kubernetes-worker.cni-plugins.installed')
remove_state('kubernetes-worker.config.created')
remove_state('kubernetes-worker.ingress.available')
remove_state('worker.auth.bootstrapped')
set_state('kubernetes-worker.restart-needed')
def get_resource_checksum_db_key(resource):
''' Convert a resource name to a resource checksum database key. '''
return 'kubernetes-worker.resource-checksums.' + resource
def calculate_resource_checksum(resource):
''' Calculate a checksum for a resource '''
md5 = hashlib.md5()
path = hookenv.resource_get(resource)
if path:
with open(path, 'rb') as f:
data = f.read()
md5.update(data)
return md5.hexdigest()
def migrate_resource_checksums():
''' Migrate resource checksums from the old schema to the new one '''
for resource in snap_resources:
new_key = get_resource_checksum_db_key(resource)
if not db.get(new_key):
path = hookenv.resource_get(resource)
if path:
# old key from charms.reactive.helpers.any_file_changed
old_key = 'reactive.files_changed.' + path
old_checksum = db.get(old_key)
db.set(new_key, old_checksum)
else:
# No resource is attached. Previously, this meant no checksum
# would be calculated and stored. But now we calculate it as if
# it is a 0-byte resource, so let's go ahead and do that.
zero_checksum = hashlib.md5().hexdigest()
db.set(new_key, zero_checksum)
def check_resources_for_upgrade_needed():
hookenv.status_set('maintenance', 'Checking resources')
for resource in snap_resources:
key = get_resource_checksum_db_key(resource)
old_checksum = db.get(key)
new_checksum = calculate_resource_checksum(resource)
if new_checksum != old_checksum:
set_upgrade_needed()
def calculate_and_store_resource_checksums():
for resource in snap_resources:
key = get_resource_checksum_db_key(resource)
checksum = calculate_resource_checksum(resource)
db.set(key, checksum)
def set_upgrade_needed():
set_state('kubernetes-worker.snaps.upgrade-needed')
config = hookenv.config()
previous_channel = config.previous('channel')
require_manual = config.get('require-manual-upgrade')
if previous_channel is None or not require_manual:
set_state('kubernetes-worker.snaps.upgrade-specified')
def cleanup_pre_snap_services():
# remove old states
remove_state('kubernetes-worker.components.installed')
# disable old services
services = ['kubelet', 'kube-proxy']
for service in services:
hookenv.log('Stopping {0} service.'.format(service))
service_stop(service)
# cleanup old files
files = [
"/lib/systemd/system/kubelet.service",
"/lib/systemd/system/kube-proxy.service",
"/etc/default/kube-default",
"/etc/default/kubelet",
"/etc/default/kube-proxy",
"/srv/kubernetes",
"/usr/local/bin/kubectl",
"/usr/local/bin/kubelet",
"/usr/local/bin/kube-proxy",
"/etc/kubernetes"
]
for file in files:
if os.path.isdir(file):
hookenv.log("Removing directory: " + file)
shutil.rmtree(file)
elif os.path.isfile(file):
hookenv.log("Removing file: " + file)
os.remove(file)
@when('config.changed.channel')
def channel_changed():
set_upgrade_needed()
@when('kubernetes-worker.snaps.upgrade-specified')
def install_snaps():
channel = hookenv.config('channel')
hookenv.status_set('maintenance', 'Installing kubectl snap')
snap.install('kubectl', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kubelet snap')
snap.install('kubelet', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kube-proxy snap')
snap.install('kube-proxy', channel=channel, classic=True)
calculate_and_store_resource_checksums()
set_state('kubernetes-worker.snaps.installed')
set_state('kubernetes-worker.restart-needed')
remove_state('kubernetes-worker.snaps.upgrade-needed')
remove_state('kubernetes-worker.snaps.upgrade-specified')
@hook('stop')
def shutdown():
''' When this unit is destroyed:
- delete the current node
- stop the worker services
'''
try:
if os.path.isfile(kubeconfig_path):
kubectl('delete', 'node', get_node_name())
except CalledProcessError:
hookenv.log('Failed to unregister node.')
service_stop('snap.kubelet.daemon')
service_stop('snap.kube-proxy.daemon')
@when('docker.available')
@when_not('kubernetes-worker.cni-plugins.installed')
def install_cni_plugins():
''' Unpack the cni-plugins resource '''
charm_dir = os.getenv('CHARM_DIR')
# Get the resource via resource_get
try:
resource_name = 'cni-{}'.format(arch())
archive = hookenv.resource_get(resource_name)
except Exception:
message = 'Error fetching the cni resource.'
hookenv.log(message)
hookenv.status_set('blocked', message)
return
if not archive:
hookenv.log('Missing cni resource.')
hookenv.status_set('blocked', 'Missing cni resource.')
return
# Handle null resource publication, we check if filesize < 1mb
filesize = os.stat(archive).st_size
if filesize < 1000000:
hookenv.status_set('blocked', 'Incomplete cni resource.')
return
hookenv.status_set('maintenance', 'Unpacking cni resource.')
unpack_path = '{}/files/cni'.format(charm_dir)
os.makedirs(unpack_path, exist_ok=True)
cmd = ['tar', 'xfvz', archive, '-C', unpack_path]
hookenv.log(cmd)
check_call(cmd)
apps = [
{'name': 'loopback', 'path': '/opt/cni/bin'}
]
for app in apps:
unpacked = '{}/{}'.format(unpack_path, app['name'])
app_path = os.path.join(app['path'], app['name'])
install = ['install', '-v', '-D', unpacked, app_path]
hookenv.log(install)
check_call(install)
# Used by the "registry" action. The action is run on a single worker, but
# the registry pod can end up on any worker, so we need this directory on
# all the workers.
os.makedirs('/srv/registry', exist_ok=True)
set_state('kubernetes-worker.cni-plugins.installed')
@when('kubernetes-worker.snaps.installed')
def set_app_version():
''' Declare the application version to juju '''
cmd = ['kubelet', '--version']
version = check_output(cmd)
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
@when('kubernetes-worker.snaps.installed')
@when('snap.refresh.set')
@when('leadership.is_leader')
def process_snapd_timer():
''' Set the snapd refresh timer on the leader so all cluster members
(present and future) will refresh near the same time. '''
# Get the current snapd refresh timer; we know layer-snap has set this
# when the 'snap.refresh.set' flag is present.
timer = snap.get(snapname='core', key='refresh.timer').decode('utf-8')
# The first time through, data_changed will be true. Subsequent calls
# should only update leader data if something changed.
if data_changed('worker_snapd_refresh', timer):
hookenv.log('setting snapd_refresh timer to: {}'.format(timer))
leader_set({'snapd_refresh': timer})
@when('kubernetes-worker.snaps.installed')
@when('snap.refresh.set')
@when('leadership.changed.snapd_refresh')
@when_not('leadership.is_leader')
def set_snapd_timer():
''' Set the snapd refresh.timer on non-leader cluster members. '''
# NB: This method should only be run when 'snap.refresh.set' is present.
# Layer-snap will always set a core refresh.timer, which may not be the
# same as our leader. Gating with 'snap.refresh.set' ensures layer-snap
# has finished and we are free to set our config to the leader's timer.
timer = leader_get('snapd_refresh')
hookenv.log('setting snapd_refresh timer to: {}'.format(timer))
snap.set_refresh_timer(timer)
@hookenv.atexit
def charm_status():
'''Update the status message with the current status of kubelet.'''
vsphere_joined = is_state('endpoint.vsphere.joined')
azure_joined = is_state('endpoint.azure.joined')
cloud_blocked = is_state('kubernetes-worker.cloud.blocked')
if vsphere_joined and cloud_blocked:
hookenv.status_set('blocked',
'vSphere integration requires K8s 1.12 or greater')
return
if azure_joined and cloud_blocked:
hookenv.status_set('blocked',
'Azure integration requires K8s 1.11 or greater')
return
if is_state('kubernetes-worker.cloud.pending'):
hookenv.status_set('waiting', 'Waiting for cloud integration')
return
if not is_state('kube-control.dns.available'):
# During deployment the worker has to start kubelet without cluster dns
# configured. If this is the first unit online in a service pool
# waiting to self host the dns pod, and configure itself to query the
# dns service declared in the kube-system namespace
hookenv.status_set('waiting', 'Waiting for cluster DNS.')
return
if is_state('kubernetes-worker.snaps.upgrade-specified'):
hookenv.status_set('waiting', 'Upgrade pending')
return
if is_state('kubernetes-worker.snaps.upgrade-needed'):
hookenv.status_set('blocked',
'Needs manual upgrade, run the upgrade action')
return
if is_state('kubernetes-worker.snaps.installed'):
update_kubelet_status()
return
else:
pass # will have been set by snap layer or other handler
def update_kubelet_status():
''' There are different states that the kubelet can be in, where we are
waiting for dns, waiting for cluster turnup, or ready to serve
applications.'''
services = [
'kubelet',
'kube-proxy'
]
failing_services = []
for service in services:
daemon = 'snap.{}.daemon'.format(service)
if not _systemctl_is_active(daemon):
failing_services.append(service)
if len(failing_services) == 0:
hookenv.status_set('active', 'Kubernetes worker running.')
else:
msg = 'Waiting for {} to start.'.format(','.join(failing_services))
hookenv.status_set('waiting', msg)
def get_ingress_address(relation):
try:
network_info = hookenv.network_get(relation.relation_name)
except NotImplementedError:
network_info = []
if network_info and 'ingress-addresses' in network_info:
# just grab the first one for now, maybe be more robust here?
return network_info['ingress-addresses'][0]
else:
# if they don't have ingress-addresses they are running a juju that
# doesn't support spaces, so just return the private address
return hookenv.unit_get('private-address')
@when('certificates.available', 'kube-control.connected')
def send_data(tls, kube_control):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
ingress_ip = get_ingress_address(kube_control)
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
ingress_ip,
gethostname()
]
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('kube-api-endpoint.available', 'kube-control.dns.available',
'cni.available')
def watch_for_changes(kube_api, kube_control, cni):
''' Watch for configuration changes and signal if we need to restart the
worker services '''
servers = get_kube_api_servers(kube_api)
dns = kube_control.get_dns()
cluster_cidr = cni.get_config()['cidr']
if (data_changed('kube-api-servers', servers) or
data_changed('kube-dns', dns) or
data_changed('cluster-cidr', cluster_cidr)):
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.snaps.installed', 'kube-api-endpoint.available',
'tls_client.ca.saved', 'tls_client.client.certificate.saved',
'tls_client.client.key.saved', 'tls_client.server.certificate.saved',
'tls_client.server.key.saved',
'kube-control.dns.available', 'kube-control.auth.available',
'cni.available', 'kubernetes-worker.restart-needed',
'worker.auth.bootstrapped')
@when_not('kubernetes-worker.cloud.pending',
'kubernetes-worker.cloud.blocked')
def start_worker(kube_api, kube_control, auth_control, cni):
''' Start kubelet using the provided API and DNS info.'''
servers = get_kube_api_servers(kube_api)
# Note that the DNS server doesn't necessarily exist at this point. We know
# what its IP will eventually be, though, so we can go ahead and configure
# kubelet with that info. This ensures that early pods are configured with
# the correct DNS even though the server isn't ready yet.
dns = kube_control.get_dns()
ingress_ip = get_ingress_address(kube_control)
cluster_cidr = cni.get_config()['cidr']
if cluster_cidr is None:
hookenv.log('Waiting for cluster cidr.')
return
creds = db.get('credentials')
data_changed('kube-control.creds', creds)
create_config(random.choice(servers), creds)
configure_kubelet(dns, ingress_ip)
configure_kube_proxy(servers, cluster_cidr)
set_state('kubernetes-worker.config.created')
restart_unit_services()
update_kubelet_status()
set_state('kubernetes-worker.label-config-required')
remove_state('kubernetes-worker.restart-needed')
@when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
''' Set worker configuration on the CNI relation. This lets the CNI
subordinate know that we're the worker so it can respond accordingly. '''
cni.set_config(is_master=False, kubeconfig_path=kubeconfig_path)
@when('config.changed.ingress')
def toggle_ingress_state():
''' Ingress is a toggled state. Remove ingress.available if set when
toggled '''
remove_state('kubernetes-worker.ingress.available')
@when('docker.sdn.configured')
def sdn_changed():
'''The Software Defined Network changed on the container so restart the
kubernetes services.'''
restart_unit_services()
update_kubelet_status()
remove_state('docker.sdn.configured')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.ingress.available')
def render_and_launch_ingress():
''' If configuration has ingress daemon set enabled, launch the ingress
load balancer and default http backend. Otherwise attempt deletion. '''
config = hookenv.config()
# If ingress is enabled, launch the ingress controller
if config.get('ingress'):
launch_default_ingress_controller()
else:
hookenv.log('Deleting the http backend and ingress.')
kubectl_manifest('delete',
'/root/cdk/addons/default-http-backend.yaml')
kubectl_manifest('delete',
'/root/cdk/addons/ingress-daemon-set.yaml') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
@when('config.changed.labels')
def handle_labels_changed():
set_state('kubernetes-worker.label-config-required')
@when('kubernetes-worker.label-config-required',
'kubernetes-worker.config.created')
def apply_node_labels():
''' Parse the labels configuration option and apply the labels to the
node. '''
# Get the user's configured labels.
config = hookenv.config()
user_labels = {}
for item in config.get('labels').split(' '):
if '=' in item:
key, val = item.split('=')
user_labels[key] = val
else:
hookenv.log('Skipping malformed option: {}.'.format(item))
# Collect the current label state.
current_labels = db.get('current_labels') or {}
# Remove any labels that the user has removed from the config.
for key in list(current_labels.keys()):
if key not in user_labels:
try:
remove_label(key)
del current_labels[key]
db.set('current_labels', current_labels)
except ApplyNodeLabelFailed as e:
hookenv.log(str(e))
return
# Add any new labels.
for key, val in user_labels.items():
try:
set_label(key, val)
current_labels[key] = val
db.set('current_labels', current_labels)
except ApplyNodeLabelFailed as e:
hookenv.log(str(e))
return
# Set the juju-application label.
try:
set_label('juju-application', hookenv.service_name())
except ApplyNodeLabelFailed as e:
hookenv.log(str(e))
return
# Label configuration complete.
remove_state('kubernetes-worker.label-config-required')
@when_any('config.changed.kubelet-extra-args',
'config.changed.proxy-extra-args',
'config.changed.kubelet-extra-config')
def config_changed_requires_restart():
set_state('kubernetes-worker.restart-needed')
@when('config.changed.docker-logins')
def docker_logins_changed():
"""Set a flag to handle new docker login options.
If docker daemon options have also changed, set a flag to ensure the
daemon is restarted prior to running docker login.
"""
config = hookenv.config()
if data_changed('docker-opts', config['docker-opts']):
hookenv.log('Found new docker daemon options. Requesting a restart.')
# State will be removed by layer-docker after restart
set_state('docker.restart')
set_state('kubernetes-worker.docker-login')
@when('kubernetes-worker.docker-login')
@when_not('docker.restart')
def run_docker_login():
"""Login to a docker registry with configured credentials."""
config = hookenv.config()
previous_logins = config.previous('docker-logins')
logins = config['docker-logins']
logins = json.loads(logins)
if previous_logins:
previous_logins = json.loads(previous_logins)
next_servers = {login['server'] for login in logins}
previous_servers = {login['server'] for login in previous_logins}
servers_to_logout = previous_servers - next_servers
for server in servers_to_logout:
cmd = ['docker', 'logout', server]
subprocess.check_call(cmd)
for login in logins:
server = login['server']
username = login['username']
password = login['password']
cmd = ['docker', 'login', server, '-u', username, '-p', password]
subprocess.check_call(cmd)
remove_state('kubernetes-worker.docker-login')
set_state('kubernetes-worker.restart-needed')
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture
def create_config(server, creds):
'''Create a kubernetes configuration for the worker unit.'''
# Get the options from the tls-client layer.
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
# Create kubernetes configuration in the default location for ubuntu.
create_kubeconfig('/home/ubuntu/.kube/config', server, ca,
token=creds['client_token'], user='ubuntu')
# Make the config dir readable by the ubuntu users so juju scp works.
cmd = ['chown', '-R', 'ubuntu:ubuntu', '/home/ubuntu/.kube']
check_call(cmd)
# Create kubernetes configuration in the default location for root.
create_kubeconfig(kubeclientconfig_path, server, ca,
token=creds['client_token'], user='root')
# Create kubernetes configuration for kubelet, and kube-proxy services.
create_kubeconfig(kubeconfig_path, server, ca,
token=creds['kubelet_token'], user='kubelet')
create_kubeconfig(kubeproxyconfig_path, server, ca,
token=creds['proxy_token'], user='kube-proxy')
def parse_extra_args(config_key):
elements = hookenv.config().get(config_key, '').split()
args = {}
for element in elements:
if '=' in element:
key, _, value = element.partition('=')
args[key] = value
else:
args[element] = 'true'
return args
def configure_kubernetes_service(service, base_args, extra_args_key):
db = unitdata.kv()
prev_args_key = 'kubernetes-worker.prev_args.' + service
prev_args = db.get(prev_args_key) or {}
extra_args = parse_extra_args(extra_args_key)
args = {}
for arg in prev_args:
# remove previous args by setting to null
args[arg] = 'null'
for k, v in base_args.items():
args[k] = v
for k, v in extra_args.items():
args[k] = v
cmd = ['snap', 'set', service] + ['%s=%s' % item for item in args.items()]
check_call(cmd)
db.set(prev_args_key, args)
def merge_kubelet_extra_config(config, extra_config):
''' Updates config to include the contents of extra_config. This is done
recursively to allow deeply nested dictionaries to be merged.
This is destructive: it modifies the config dict that is passed in.
'''
for k, extra_config_value in extra_config.items():
if isinstance(extra_config_value, dict):
config_value = config.setdefault(k, {})
merge_kubelet_extra_config(config_value, extra_config_value)
else:
config[k] = extra_config_value
def configure_kubelet(dns, ingress_ip):
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
kubelet_opts = {}
kubelet_opts['require-kubeconfig'] = 'true'
kubelet_opts['kubeconfig'] = kubeconfig_path
kubelet_opts['network-plugin'] = 'cni'
kubelet_opts['v'] = '0'
kubelet_opts['logtostderr'] = 'true'
kubelet_opts['node-ip'] = ingress_ip
kubelet_opts['allow-privileged'] = set_privileged()
if is_state('endpoint.aws.ready'):
kubelet_opts['cloud-provider'] = 'aws'
elif is_state('endpoint.gcp.ready'):
cloud_config_path = _cloud_config_path('kubelet')
kubelet_opts['cloud-provider'] = 'gce'
kubelet_opts['cloud-config'] = str(cloud_config_path)
elif is_state('endpoint.openstack.ready'):
cloud_config_path = _cloud_config_path('kubelet')
kubelet_opts['cloud-provider'] = 'openstack'
kubelet_opts['cloud-config'] = str(cloud_config_path)
elif is_state('endpoint.vsphere.joined'):
# vsphere just needs to be joined on the worker (vs 'ready')
cloud_config_path = _cloud_config_path('kubelet')
kubelet_opts['cloud-provider'] = 'vsphere'
# NB: vsphere maps node product-id to its uuid (no config file needed).
uuid_file = '/sys/class/dmi/id/product_uuid'
with open(uuid_file, 'r') as f:
uuid = f.read().strip()
kubelet_opts['provider-id'] = 'vsphere://{}'.format(uuid)
elif is_state('endpoint.azure.ready'):
azure = endpoint_from_flag('endpoint.azure.ready')
cloud_config_path = _cloud_config_path('kubelet')
kubelet_opts['cloud-provider'] = 'azure'
kubelet_opts['cloud-config'] = str(cloud_config_path)
kubelet_opts['provider-id'] = azure.vm_id
if get_version('kubelet') >= (1, 10):
# Put together the KubeletConfiguration data
kubelet_config = {
'apiVersion': 'kubelet.config.k8s.io/v1beta1',
'kind': 'KubeletConfiguration',
'address': '0.0.0.0',
'authentication': {
'anonymous': {
'enabled': False
},
'x509': {
'clientCAFile': ca_cert_path
}
},
'clusterDomain': dns['domain'],
'failSwapOn': False,
'port': 10250,
'tlsCertFile': server_cert_path,
'tlsPrivateKeyFile': server_key_path
}
if dns['enable-kube-dns']:
kubelet_config['clusterDNS'] = [dns['sdn-ip']]
if is_state('kubernetes-worker.gpu.enabled'):
kubelet_config['featureGates'] = {
'DevicePlugins': True
}
# Add kubelet-extra-config. This needs to happen last so that it
# overrides any config provided by the charm.
kubelet_extra_config = hookenv.config('kubelet-extra-config')
kubelet_extra_config = yaml.load(kubelet_extra_config)
merge_kubelet_extra_config(kubelet_config, kubelet_extra_config)
# Render the file and configure Kubelet to use it
os.makedirs('/root/cdk/kubelet', exist_ok=True)
with open('/root/cdk/kubelet/config.yaml', 'w') as f:
f.write('# Generated by kubernetes-worker charm, do not edit\n')
yaml.dump(kubelet_config, f)
kubelet_opts['config'] = '/root/cdk/kubelet/config.yaml'
else:
# NOTE: This is for 1.9. Once we've dropped 1.9 support, we can remove
# this whole block and the parent if statement.
kubelet_opts['address'] = '0.0.0.0'
kubelet_opts['anonymous-auth'] = 'false'
kubelet_opts['client-ca-file'] = ca_cert_path
kubelet_opts['cluster-domain'] = dns['domain']
kubelet_opts['fail-swap-on'] = 'false'
kubelet_opts['port'] = '10250'
kubelet_opts['tls-cert-file'] = server_cert_path
kubelet_opts['tls-private-key-file'] = server_key_path
if dns['enable-kube-dns']:
kubelet_opts['cluster-dns'] = dns['sdn-ip']
if is_state('kubernetes-worker.gpu.enabled'):
kubelet_opts['feature-gates'] = 'DevicePlugins=true'
if get_version('kubelet') >= (1, 11):
kubelet_opts['dynamic-config-dir'] = '/root/cdk/kubelet/dynamic-config'
configure_kubernetes_service('kubelet', kubelet_opts, 'kubelet-extra-args')
def configure_kube_proxy(api_servers, cluster_cidr):
kube_proxy_opts = {}
kube_proxy_opts['cluster-cidr'] = cluster_cidr
kube_proxy_opts['kubeconfig'] = kubeproxyconfig_path
kube_proxy_opts['logtostderr'] = 'true'
kube_proxy_opts['v'] = '0'
kube_proxy_opts['master'] = random.choice(api_servers)
kube_proxy_opts['hostname-override'] = get_node_name()
if b'lxc' in check_output('virt-what', shell=True):
kube_proxy_opts['conntrack-max-per-core'] = '0'
configure_kubernetes_service('kube-proxy', kube_proxy_opts,
'proxy-extra-args')
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
user='ubuntu', context='juju-context',
cluster='juju-cluster', password=None, token=None):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
if not key and not certificate and not password and not token:
raise ValueError('Missing authentication mechanism.')
# token and password are mutually exclusive. Error early if both are
# present. The developer has requested an impossible situation.
# see: kubectl config set-credentials --help
if token and password:
raise ValueError('Token and Password are mutually exclusive.')
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Delete old users
cmd = 'kubectl config --kubeconfig={0} unset users'
check_call(split(cmd.format(kubeconfig)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} ' \
'set-credentials {1} '.format(kubeconfig, user)
if key and certificate:
cmd = '{0} --client-key={1} --client-certificate={2} '\
'--embed-certs=true'.format(cmd, key, certificate)
if password:
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
# This is mutually exclusive from password. They will not work together.
if token:
cmd = "{0} --token={1}".format(cmd, token)
check_call(split(cmd))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
@when_any('config.changed.default-backend-image',
'config.changed.ingress-ssl-chain-completion',
'config.changed.nginx-image')
@when('kubernetes-worker.config.created')
def launch_default_ingress_controller():
''' Launch the Kubernetes ingress controller & default backend (404) '''
config = hookenv.config()
# need to test this in case we get in
# here from a config change to the image
if not config.get('ingress'):
return
context = {}
context['arch'] = arch()
addon_path = '/root/cdk/addons/{}'
context['defaultbackend_image'] = config.get('default-backend-image')
if (context['defaultbackend_image'] == "" or
context['defaultbackend_image'] == "auto"):
if context['arch'] == 's390x':
context['defaultbackend_image'] = \
"k8s.gcr.io/defaultbackend-s390x:1.5"
elif context['arch'] == 'arm64':
context['defaultbackend_image'] = \
"k8s.gcr.io/defaultbackend-arm64:1.5"
else:
context['defaultbackend_image'] = \
"k8s.gcr.io/defaultbackend-amd64:1.5"
# Render the default http backend (404) replicationcontroller manifest
manifest = addon_path.format('default-http-backend.yaml')
render('default-http-backend.yaml', manifest, context)
hookenv.log('Creating the default http backend.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create default-http-backend. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
# Render the ingress daemon set controller manifest
context['ssl_chain_completion'] = config.get(
'ingress-ssl-chain-completion')
context['ingress_image'] = config.get('nginx-image')
if context['ingress_image'] == "" or context['ingress_image'] == "auto":
images = {'amd64': 'quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.16.1', # noqa
'arm64': 'quay.io/kubernetes-ingress-controller/nginx-ingress-controller-arm64:0.16.1', # noqa
's390x': 'quay.io/kubernetes-ingress-controller/nginx-ingress-controller-s390x:0.16.1', # noqa
'ppc64el': 'quay.io/kubernetes-ingress-controller/nginx-ingress-controller-ppc64le:0.16.1', # noqa
}
context['ingress_image'] = images.get(context['arch'], images['amd64'])
if get_version('kubelet') < (1, 9):
context['daemonset_api_version'] = 'extensions/v1beta1'
else:
context['daemonset_api_version'] = 'apps/v1'
context['juju_application'] = hookenv.service_name()
manifest = addon_path.format('ingress-daemon-set.yaml')
render('ingress-daemon-set.yaml', manifest, context)
hookenv.log('Creating the ingress daemon set.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create ingress controller. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
set_state('kubernetes-worker.ingress.available')
hookenv.open_port(80)
hookenv.open_port(443)
def restart_unit_services():
'''Restart worker services.'''
hookenv.log('Restarting kubelet and kube-proxy.')
services = ['kube-proxy', 'kubelet']
for service in services:
service_restart('snap.%s.daemon' % service)
def get_kube_api_servers(kube_api):
'''Return the kubernetes api server address and port for this
relationship.'''
hosts = []
# Iterate over every service from the relation object.
for service in kube_api.services():
for unit in service['hosts']:
hosts.append('https://{0}:{1}'.format(unit['hostname'],
unit['port']))
return hosts
def kubectl(*args):
''' Run a kubectl cli command with a config file. Returns stdout and throws
an error if the command fails. '''
command = ['kubectl', '--kubeconfig=' + kubeclientconfig_path] + list(args)
hookenv.log('Executing {}'.format(command))
return check_output(command)
def kubectl_success(*args):
''' Runs kubectl with the given args. Returns True if successful, False if
not. '''
try:
kubectl(*args)
return True
except CalledProcessError:
return False
def kubectl_manifest(operation, manifest):
''' Wrap the kubectl creation command when using filepath resources
:param operation - one of get, create, delete, replace
:param manifest - filepath to the manifest
'''
# Deletions are a special case
if operation == 'delete':
# Ensure we immediately remove requested resources with --now
return kubectl_success(operation, '-f', manifest, '--now')
else:
# Guard against an error re-creating the same manifest multiple times
if operation == 'create':
# If we already have the definition, its probably safe to assume
# creation was true.
if kubectl_success('get', '-f', manifest):
hookenv.log('Skipping definition for {}'.format(manifest))
return True
# Execute the requested command that did not match any of the special
# cases above
return kubectl_success(operation, '-f', manifest)
@when('nrpe-external-master.available')
@when_not('nrpe-external-master.initial-config')
def initial_nrpe_config(nagios=None):
set_state('nrpe-external-master.initial-config')
update_nrpe_config(nagios)
@when('kubernetes-worker.config.created')
@when('nrpe-external-master.available')
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups')
def update_nrpe_config(unused=None):
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
nrpe_setup.write()
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config(nagios=None):
remove_state('nrpe-external-master.initial-config')
# List of systemd services for which the checks will be removed
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname)
for service in services:
nrpe_setup.remove_check(shortname=service)
def set_privileged():
"""Return 'true' if privileged containers are needed.
This is when a) the user requested them
b) user does not care (auto) and GPUs are available in a pre
1.9 era
"""
privileged = hookenv.config('allow-privileged').lower()
gpu_needs_privileged = (is_state('kubernetes-worker.gpu.enabled') and
get_version('kubelet') < (1, 9))
if privileged == 'auto':
privileged = 'true' if gpu_needs_privileged else 'false'
if privileged == 'false' and gpu_needs_privileged:
disable_gpu()
remove_state('kubernetes-worker.gpu.enabled')
# No need to restart kubernetes (set the restart-needed state)
# because set-privileged is already in the restart path
return privileged
@when('config.changed.allow-privileged')
@when('kubernetes-worker.config.created')
def on_config_allow_privileged_change():
"""React to changed 'allow-privileged' config value.
"""
set_state('kubernetes-worker.restart-needed')
remove_state('config.changed.allow-privileged')
@when('nvidia-docker.installed')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.gpu.enabled')
def enable_gpu():
"""Enable GPU usage on this node.
"""
if get_version('kubelet') < (1, 9):
hookenv.status_set(
'active',
'Upgrade to snap channel >= 1.9/stable to enable GPU support.'
)
return
hookenv.log('Enabling gpu mode')
try:
# Not sure why this is necessary, but if you don't run this, k8s will
# think that the node has 0 gpus (as shown by the output of
# `kubectl get nodes -o yaml`
check_call(['nvidia-smi'])
except CalledProcessError as cpe:
hookenv.log('Unable to communicate with the NVIDIA driver.')
hookenv.log(cpe)
return
set_label('gpu', 'true')
set_label('cuda', 'true')
set_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when_not('nvidia-docker.installed')
@when_not('kubernetes-worker.restart-needed')
def nvidia_departed():
"""Cuda departed, probably due to the docker layer switching to a
non nvidia-docker."""
disable_gpu()
remove_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
def disable_gpu():
"""Disable GPU usage on this node.
"""
hookenv.log('Disabling gpu mode')
# Remove node labels
remove_label('gpu')
remove_label('cuda')
@when('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_enabled(kube_control):
"""Notify kubernetes-master that we're gpu-enabled.
"""
kube_control.set_gpu(True)
@when_not('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_not_enabled(kube_control):
"""Notify kubernetes-master that we're not gpu-enabled.
"""
kube_control.set_gpu(False)
@when('kube-control.connected')
def request_kubelet_and_proxy_credentials(kube_control):
""" Request kubelet node authorization with a well formed kubelet user.
This also implies that we are requesting kube-proxy auth. """
# The kube-cotrol interface is created to support RBAC.
# At this point we might as well do the right thing and return the hostname
# even if it will only be used when we enable RBAC
nodeuser = 'system:node:{}'.format(get_node_name().lower())
kube_control.set_auth_request(nodeuser)
@when('kube-control.connected')
def catch_change_in_creds(kube_control):
"""Request a service restart in case credential updates were detected."""
nodeuser = 'system:node:{}'.format(get_node_name().lower())
creds = kube_control.get_auth_credentials(nodeuser)
if creds and creds['user'] == nodeuser:
# We need to cache the credentials here because if the
# master changes (master leader dies and replaced by a new one)
# the new master will have no recollection of our certs.
db.set('credentials', creds)
set_state('worker.auth.bootstrapped')
if data_changed('kube-control.creds', creds):
set_state('kubernetes-worker.restart-needed')
@when_not('kube-control.connected')
def missing_kube_control():
"""Inform the operator they need to add the kube-control relation.
If deploying via bundle this won't happen, but if operator is upgrading a
a charm in a deployment that pre-dates the kube-control relation, it'll be
missing.
"""
try:
goal_state = hookenv.goal_state()
except NotImplementedError:
goal_state = {}
if 'kube-control' in goal_state.get('relations', {}):
hookenv.status_set(
'waiting',
'Waiting for kubernetes-master to become ready')
else:
hookenv.status_set(
'blocked',
'Relate {}:kube-control kubernetes-master:kube-control'.format(
hookenv.service_name()))
@when('docker.ready')
def fix_iptables_for_docker_1_13():
""" Fix iptables FORWARD policy for Docker >=1.13
https://github.com/kubernetes/kubernetes/issues/40182
https://github.com/kubernetes/kubernetes/issues/39823
"""
cmd = ['iptables', '-w', '300', '-P', 'FORWARD', 'ACCEPT']
check_call(cmd)
def _systemctl_is_active(application):
''' Poll systemctl to determine if the application is running '''
cmd = ['systemctl', 'is-active', application]
try:
raw = check_output(cmd)
return b'active' in raw
except Exception:
return False
def get_node_name():
kubelet_extra_args = parse_extra_args('kubelet-extra-args')
cloud_provider = kubelet_extra_args.get('cloud-provider', '')
if is_state('endpoint.aws.ready'):
cloud_provider = 'aws'
elif is_state('endpoint.gcp.ready'):
cloud_provider = 'gce'
elif is_state('endpoint.openstack.ready'):
cloud_provider = 'openstack'
elif is_state('endpoint.vsphere.ready'):
cloud_provider = 'vsphere'
elif is_state('endpoint.azure.ready'):
cloud_provider = 'azure'
if cloud_provider == 'aws':
return getfqdn().lower()
else:
return gethostname().lower()
class ApplyNodeLabelFailed(Exception):
pass
def persistent_call(cmd, retry_message):
deadline = time.time() + 180
while time.time() < deadline:
code = subprocess.call(cmd)
if code == 0:
return True
hookenv.log(retry_message)
time.sleep(1)
else:
return False
def set_label(label, value):
nodename = get_node_name()
cmd = 'kubectl --kubeconfig={0} label node {1} {2}={3} --overwrite'
cmd = cmd.format(kubeconfig_path, nodename, label, value)
cmd = cmd.split()
retry = 'Failed to apply label %s=%s. Will retry.' % (label, value)
if not persistent_call(cmd, retry):
raise ApplyNodeLabelFailed(retry)
def remove_label(label):
nodename = get_node_name()
cmd = 'kubectl --kubeconfig={0} label node {1} {2}-'
cmd = cmd.format(kubeconfig_path, nodename, label)
cmd = cmd.split()
retry = 'Failed to remove label {0}. Will retry.'.format(label)
if not persistent_call(cmd, retry):
raise ApplyNodeLabelFailed(retry)
@when_any('endpoint.aws.joined',
'endpoint.gcp.joined',
'endpoint.openstack.joined',
'endpoint.vsphere.joined',
'endpoint.azure.joined')
@when_not('kubernetes-worker.cloud.ready')
def set_cloud_pending():
k8s_version = get_version('kubelet')
k8s_1_11 = k8s_version >= (1, 11)
k8s_1_12 = k8s_version >= (1, 12)
vsphere_joined = is_state('endpoint.vsphere.joined')
azure_joined = is_state('endpoint.azure.joined')
if (vsphere_joined and not k8s_1_12) or (azure_joined and not k8s_1_11):
set_state('kubernetes-worker.cloud.blocked')
else:
remove_state('kubernetes-worker.cloud.blocked')
set_state('kubernetes-worker.cloud.pending')
@when_any('endpoint.aws.joined',
'endpoint.gcp.joined',
'endpoint.azure.joined')
@when('kube-control.cluster_tag.available')
@when_not('kubernetes-worker.cloud.request-sent')
def request_integration():
hookenv.status_set('maintenance', 'requesting cloud integration')
kube_control = endpoint_from_flag('kube-control.cluster_tag.available')
cluster_tag = kube_control.get_cluster_tag()
if is_state('endpoint.aws.joined'):
cloud = endpoint_from_flag('endpoint.aws.joined')
cloud.tag_instance({
'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
})
cloud.tag_instance_security_group({
'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
})
cloud.tag_instance_subnet({
'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
})
cloud.enable_object_storage_management(['kubernetes-*'])
elif is_state('endpoint.gcp.joined'):
cloud = endpoint_from_flag('endpoint.gcp.joined')
cloud.label_instance({
'k8s-io-cluster-name': cluster_tag,
})
cloud.enable_object_storage_management()
elif is_state('endpoint.azure.joined'):
cloud = endpoint_from_flag('endpoint.azure.joined')
cloud.tag_instance({
'k8s-io-cluster-name': cluster_tag,
})
cloud.enable_object_storage_management()
cloud.enable_instance_inspection()
cloud.enable_dns_management()
set_state('kubernetes-worker.cloud.request-sent')
hookenv.status_set('waiting', 'Waiting for cloud integration')
@when_none('endpoint.aws.joined',
'endpoint.gcp.joined',
'endpoint.openstack.joined',
'endpoint.vsphere.joined',
'endpoint.azure.joined')
def clear_cloud_flags():
remove_state('kubernetes-worker.cloud.pending')
remove_state('kubernetes-worker.cloud.request-sent')
remove_state('kubernetes-worker.cloud.blocked')
remove_state('kubernetes-worker.cloud.ready')
@when_any('endpoint.aws.ready',
'endpoint.gcp.ready',
'endpoint.openstack.ready',
'endpoint.vsphere.ready',
'endpoint.azure.ready')
@when_not('kubernetes-worker.cloud.blocked',
'kubernetes-worker.cloud.ready')
def cloud_ready():
remove_state('kubernetes-worker.cloud.pending')
if is_state('endpoint.gcp.ready'):
_write_gcp_snap_config('kubelet')
elif is_state('endpoint.openstack.ready'):
_write_openstack_snap_config('kubelet')
elif is_state('endpoint.azure.ready'):
_write_azure_snap_config('kubelet')
set_state('kubernetes-worker.cloud.ready')
set_state('kubernetes-worker.restart-needed') # force restart
def _snap_common_path(component):
return Path('/var/snap/{}/common'.format(component))
def _cloud_config_path(component):
return _snap_common_path(component) / 'cloud-config.conf'
def _gcp_creds_path(component):
return _snap_common_path(component) / 'gcp-creds.json'
def _daemon_env_path(component):
return _snap_common_path(component) / 'environment'
def _write_gcp_snap_config(component):
# gcp requires additional credentials setup
gcp = endpoint_from_flag('endpoint.gcp.ready')
creds_path = _gcp_creds_path(component)
with creds_path.open('w') as fp:
os.fchmod(fp.fileno(), 0o600)
fp.write(gcp.credentials)
# create a cloud-config file that sets token-url to nil to make the
# services use the creds env var instead of the metadata server, as
# well as making the cluster multizone
cloud_config_path = _cloud_config_path(component)
cloud_config_path.write_text('[Global]\n'
'token-url = nil\n'
'multizone = true\n')
daemon_env_path = _daemon_env_path(component)
if daemon_env_path.exists():
daemon_env = daemon_env_path.read_text()
if not daemon_env.endswith('\n'):
daemon_env += '\n'
else:
daemon_env = ''
if gcp_creds_env_key not in daemon_env:
daemon_env += '{}={}\n'.format(gcp_creds_env_key, creds_path)
daemon_env_path.parent.mkdir(parents=True, exist_ok=True)
daemon_env_path.write_text(daemon_env)
def _write_openstack_snap_config(component):
# openstack requires additional credentials setup
openstack = endpoint_from_flag('endpoint.openstack.ready')
cloud_config_path = _cloud_config_path(component)
cloud_config_path.write_text('\n'.join([
'[Global]',
'auth-url = {}'.format(openstack.auth_url),
'username = {}'.format(openstack.username),
'password = {}'.format(openstack.password),
'tenant-name = {}'.format(openstack.project_name),
'domain-name = {}'.format(openstack.user_domain_name),
]))
def _write_azure_snap_config(component):
azure = endpoint_from_flag('endpoint.azure.ready')
cloud_config_path = _cloud_config_path(component)
cloud_config_path.write_text(json.dumps({
'useInstanceMetadata': True,
'useManagedIdentityExtension': True,
'subscriptionId': azure.subscription_id,
'resourceGroup': azure.resource_group,
'location': azure.resource_group_location,
'vnetName': azure.vnet_name,
'vnetResourceGroup': azure.vnet_resource_group,
'subnetName': azure.subnet_name,
'securityGroupName': azure.security_group_name,
}))
def get_first_mount(mount_relation):
mount_relation_list = mount_relation.mounts()
if mount_relation_list and len(mount_relation_list) > 0:
# mount relation list is a list of the mount layer relations
# for now we just use the first one that is nfs
for mount in mount_relation_list:
# for now we just check the first mount and use that.
# the nfs charm only supports one for now.
if ('mounts' in mount and
mount['mounts'][0]['fstype'] == 'nfs'):
return mount['mounts'][0]
return None
@when('nfs.available')
def nfs_state_control(mount):
''' Determine if we should remove the state that controls the re-render
and execution of the nfs-relation-changed event because there
are changes in the relationship data, and we should re-render any
configs '''
mount_data = get_first_mount(mount)
if mount_data:
nfs_relation_data = {
'options': mount_data['options'],
'host': mount_data['hostname'],
'mountpoint': mount_data['mountpoint'],
'fstype': mount_data['fstype']
}
# Re-execute the rendering if the data has changed.
if data_changed('nfs-config', nfs_relation_data):
hookenv.log('reconfiguring nfs')
remove_state('nfs.configured')
@when('nfs.available')
@when_not('nfs.configured')
def nfs_storage(mount):
'''NFS on kubernetes requires nfs config rendered into a deployment of
the nfs client provisioner. That will handle the persistent volume claims
with no persistent volume to back them.'''
mount_data = get_first_mount(mount)
if not mount_data:
return
addon_path = '/root/cdk/addons/{}'
# Render the NFS deployment
manifest = addon_path.format('nfs-provisioner.yaml')
render('nfs-provisioner.yaml', manifest, mount_data)
hookenv.log('Creating the nfs provisioner.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create nfs provisioner. Will attempt again next update.') # noqa
return
set_state('nfs.configured')
| apache-2.0 | -7,516,812,032,296,352,000 | 36.236 | 117 | 0.652147 | false |
savoirfairelinux/odoo | addons/hr_expense/hr_expense.py | 22 | 25312 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
def _employee_get(obj, cr, uid, context=None):
if context is None:
context = {}
ids = obj.pool.get('hr.employee').search(cr, uid, [('user_id', '=', uid)], context=context)
if ids:
return ids[0]
return False
class hr_expense_expense(osv.osv):
def _amount(self, cr, uid, ids, field_name, arg, context=None):
res= {}
for expense in self.browse(cr, uid, ids, context=context):
total = 0.0
for line in expense.line_ids:
total += line.unit_amount * line.unit_quantity
res[expense.id] = total
return res
def _get_expense_from_line(self, cr, uid, ids, context=None):
return [line.expense_id.id for line in self.pool.get('hr.expense.line').browse(cr, uid, ids, context=context)]
def _get_currency(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, [uid], context=context)[0]
return user.company_id.currency_id.id
_name = "hr.expense.expense"
_inherit = ['mail.thread']
_description = "Expense"
_order = "id desc"
_track = {
'state': {
'hr_expense.mt_expense_approved': lambda self, cr, uid, obj, ctx=None: obj.state == 'accepted',
'hr_expense.mt_expense_refused': lambda self, cr, uid, obj, ctx=None: obj.state == 'cancelled',
'hr_expense.mt_expense_confirmed': lambda self, cr, uid, obj, ctx=None: obj.state == 'confirm',
},
}
_columns = {
'name': fields.char('Description', size=128, required=True, readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'id': fields.integer('Sheet ID', readonly=True),
'date': fields.date('Date', select=True, readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'journal_id': fields.many2one('account.journal', 'Force Journal', help = "The journal used when the expense is done."),
'employee_id': fields.many2one('hr.employee', "Employee", required=True, readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'user_id': fields.many2one('res.users', 'User', required=True),
'date_confirm': fields.date('Confirmation Date', select=True, help="Date of the confirmation of the sheet expense. It's filled when the button Confirm is pressed."),
'date_valid': fields.date('Validation Date', select=True, help="Date of the acceptation of the sheet expense. It's filled when the button Accept is pressed."),
'user_valid': fields.many2one('res.users', 'Validation By', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'account_move_id': fields.many2one('account.move', 'Ledger Posting'),
'line_ids': fields.one2many('hr.expense.line', 'expense_id', 'Expense Lines', readonly=True, states={'draft':[('readonly',False)]} ),
'note': fields.text('Note'),
'amount': fields.function(_amount, string='Total Amount', digits_compute=dp.get_precision('Account'),
store={
'hr.expense.line': (_get_expense_from_line, ['unit_amount','unit_quantity'], 10)
}),
'currency_id': fields.many2one('res.currency', 'Currency', required=True, readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'department_id':fields.many2one('hr.department','Department', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'company_id': fields.many2one('res.company', 'Company', required=True),
'state': fields.selection([
('draft', 'New'),
('cancelled', 'Refused'),
('confirm', 'Waiting Approval'),
('accepted', 'Approved'),
('done', 'Waiting Payment'),
('paid', 'Paid'),
],
'Status', readonly=True, track_visibility='onchange',
help='When the expense request is created the status is \'Draft\'.\n It is confirmed by the user and request is sent to admin, the status is \'Waiting Confirmation\'.\
\nIf the admin accepts it, the status is \'Accepted\'.\n If the accounting entries are made for the expense request, the status is \'Waiting Payment\'.'),
}
_defaults = {
'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(cr, uid, 'hr.employee', context=c),
'date': fields.date.context_today,
'state': 'draft',
'employee_id': _employee_get,
'user_id': lambda cr, uid, id, c={}: id,
'currency_id': _get_currency,
}
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
default.update(
account_move_id=False,
date_confirm=False,
date_valid=False,
user_valid=False)
return super(hr_expense_expense, self).copy(cr, uid, id, default=default, context=context)
def unlink(self, cr, uid, ids, context=None):
for rec in self.browse(cr, uid, ids, context=context):
if rec.state != 'draft':
raise osv.except_osv(_('Warning!'),_('You can only delete draft expenses!'))
return super(hr_expense_expense, self).unlink(cr, uid, ids, context)
def onchange_currency_id(self, cr, uid, ids, currency_id=False, company_id=False, context=None):
res = {'value': {'journal_id': False}}
journal_ids = self.pool.get('account.journal').search(cr, uid, [('type','=','purchase'), ('currency','=',currency_id), ('company_id', '=', company_id)], context=context)
if journal_ids:
res['value']['journal_id'] = journal_ids[0]
return res
def onchange_employee_id(self, cr, uid, ids, employee_id, context=None):
emp_obj = self.pool.get('hr.employee')
department_id = False
company_id = False
if employee_id:
employee = emp_obj.browse(cr, uid, employee_id, context=context)
department_id = employee.department_id.id
company_id = employee.company_id.id
return {'value': {'department_id': department_id, 'company_id': company_id}}
def expense_confirm(self, cr, uid, ids, context=None):
for expense in self.browse(cr, uid, ids):
if expense.employee_id and expense.employee_id.parent_id.user_id:
self.message_subscribe_users(cr, uid, [expense.id], user_ids=[expense.employee_id.parent_id.user_id.id])
return self.write(cr, uid, ids, {'state': 'confirm', 'date_confirm': time.strftime('%Y-%m-%d')}, context=context)
def expense_accept(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'accepted', 'date_valid': time.strftime('%Y-%m-%d'), 'user_valid': uid}, context=context)
def expense_canceled(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'cancelled'}, context=context)
def account_move_get(self, cr, uid, expense_id, context=None):
'''
This method prepare the creation of the account move related to the given expense.
:param expense_id: Id of expense for which we are creating account_move.
:return: mapping between fieldname and value of account move to create
:rtype: dict
'''
journal_obj = self.pool.get('account.journal')
expense = self.browse(cr, uid, expense_id, context=context)
company_id = expense.company_id.id
date = expense.date_confirm
ref = expense.name
journal_id = False
if expense.journal_id:
journal_id = expense.journal_id.id
else:
journal_id = journal_obj.search(cr, uid, [('type', '=', 'purchase'), ('company_id', '=', company_id)])
if not journal_id:
raise osv.except_osv(_('Error!'), _("No expense journal found. Please make sure you have a journal with type 'purchase' configured."))
journal_id = journal_id[0]
return self.pool.get('account.move').account_move_prepare(cr, uid, journal_id, date=date, ref=ref, company_id=company_id, context=context)
def line_get_convert(self, cr, uid, x, part, date, context=None):
partner_id = self.pool.get('res.partner')._find_accounting_partner(part).id
return {
'date_maturity': x.get('date_maturity', False),
'partner_id': partner_id,
'name': x['name'][:64],
'date': date,
'debit': x['price']>0 and x['price'],
'credit': x['price']<0 and -x['price'],
'account_id': x['account_id'],
'analytic_lines': x.get('analytic_lines', False),
'amount_currency': x['price']>0 and abs(x.get('amount_currency', False)) or -abs(x.get('amount_currency', False)),
'currency_id': x.get('currency_id', False),
'tax_code_id': x.get('tax_code_id', False),
'tax_amount': x.get('tax_amount', False),
'ref': x.get('ref', False),
'quantity': x.get('quantity',1.00),
'product_id': x.get('product_id', False),
'product_uom_id': x.get('uos_id', False),
'analytic_account_id': x.get('account_analytic_id', False),
}
def compute_expense_totals(self, cr, uid, exp, company_currency, ref, account_move_lines, context=None):
'''
internal method used for computation of total amount of an expense in the company currency and
in the expense currency, given the account_move_lines that will be created. It also do some small
transformations at these account_move_lines (for multi-currency purposes)
:param account_move_lines: list of dict
:rtype: tuple of 3 elements (a, b ,c)
a: total in company currency
b: total in hr.expense currency
c: account_move_lines potentially modified
'''
cur_obj = self.pool.get('res.currency')
if context is None:
context={}
context.update({'date': exp.date_confirm or time.strftime('%Y-%m-%d')})
total = 0.0
total_currency = 0.0
for i in account_move_lines:
if exp.currency_id.id != company_currency:
i['currency_id'] = exp.currency_id.id
i['amount_currency'] = i['price']
i['price'] = cur_obj.compute(cr, uid, exp.currency_id.id,
company_currency, i['price'],
context=context)
else:
i['amount_currency'] = False
i['currency_id'] = False
total -= i['price']
total_currency -= i['amount_currency'] or i['price']
return total, total_currency, account_move_lines
def action_move_create(self, cr, uid, ids, context=None):
'''
main function that is called when trying to create the accounting entries related to an expense
'''
move_obj = self.pool.get('account.move')
for exp in self.browse(cr, uid, ids, context=context):
if not exp.employee_id.address_home_id:
raise osv.except_osv(_('Error!'), _('The employee must have a home address.'))
if not exp.employee_id.address_home_id.property_account_payable.id:
raise osv.except_osv(_('Error!'), _('The employee must have a payable account set on his home address.'))
company_currency = exp.company_id.currency_id.id
diff_currency_p = exp.currency_id.id <> company_currency
#create the move that will contain the accounting entries
move_id = move_obj.create(cr, uid, self.account_move_get(cr, uid, exp.id, context=context), context=context)
#one account.move.line per expense line (+taxes..)
eml = self.move_line_get(cr, uid, exp.id, context=context)
#create one more move line, a counterline for the total on payable account
total, total_currency, eml = self.compute_expense_totals(cr, uid, exp, company_currency, exp.name, eml, context=context)
acc = exp.employee_id.address_home_id.property_account_payable.id
eml.append({
'type': 'dest',
'name': '/',
'price': total,
'account_id': acc,
'date_maturity': exp.date_confirm,
'amount_currency': diff_currency_p and total_currency or False,
'currency_id': diff_currency_p and exp.currency_id.id or False,
'ref': exp.name
})
#convert eml into an osv-valid format
lines = map(lambda x:(0,0,self.line_get_convert(cr, uid, x, exp.employee_id.address_home_id, exp.date_confirm, context=context)), eml)
journal_id = move_obj.browse(cr, uid, move_id, context).journal_id
# post the journal entry if 'Skip 'Draft' State for Manual Entries' is checked
if journal_id.entry_posted:
move_obj.button_validate(cr, uid, [move_id], context)
move_obj.write(cr, uid, [move_id], {'line_id': lines}, context=context)
self.write(cr, uid, ids, {'account_move_id': move_id, 'state': 'done'}, context=context)
return True
def move_line_get(self, cr, uid, expense_id, context=None):
res = []
tax_obj = self.pool.get('account.tax')
cur_obj = self.pool.get('res.currency')
if context is None:
context = {}
exp = self.browse(cr, uid, expense_id, context=context)
company_currency = exp.company_id.currency_id.id
for line in exp.line_ids:
mres = self.move_line_get_item(cr, uid, line, context)
if not mres:
continue
res.append(mres)
tax_code_found= False
#Calculate tax according to default tax on product
taxes = []
#Taken from product_id_onchange in account.invoice
if line.product_id:
fposition_id = False
fpos_obj = self.pool.get('account.fiscal.position')
fpos = fposition_id and fpos_obj.browse(cr, uid, fposition_id, context=context) or False
product = line.product_id
taxes = product.supplier_taxes_id
#If taxes are not related to the product, maybe they are in the account
if not taxes:
a = product.property_account_expense.id #Why is not there a check here?
if not a:
a = product.categ_id.property_account_expense_categ.id
a = fpos_obj.map_account(cr, uid, fpos, a)
taxes = a and self.pool.get('account.account').browse(cr, uid, a, context=context).tax_ids or False
tax_id = fpos_obj.map_tax(cr, uid, fpos, taxes)
if not taxes:
continue
#Calculating tax on the line and creating move?
for tax in tax_obj.compute_all(cr, uid, taxes,
line.unit_amount ,
line.unit_quantity, line.product_id,
exp.user_id.partner_id)['taxes']:
tax_code_id = tax['base_code_id']
tax_amount = line.total_amount * tax['base_sign']
if tax_code_found:
if not tax_code_id:
continue
res.append(self.move_line_get_item(cr, uid, line, context))
res[-1]['price'] = 0.0
res[-1]['account_analytic_id'] = False
elif not tax_code_id:
continue
tax_code_found = True
res[-1]['tax_code_id'] = tax_code_id
res[-1]['tax_amount'] = cur_obj.compute(cr, uid, exp.currency_id.id, company_currency, tax_amount, context={'date': exp.date_confirm})
##
is_price_include = tax_obj.read(cr,uid,tax['id'],['price_include'],context)['price_include']
if is_price_include:
## We need to deduce the price for the tax
res[-1]['price'] = res[-1]['price'] - (tax['amount'] * tax['base_sign'] or 0.0)
assoc_tax = {
'type':'tax',
'name':tax['name'],
'price_unit': tax['price_unit'],
'quantity': 1,
'price': tax['amount'] * tax['base_sign'] or 0.0,
'account_id': tax['account_collected_id'] or mres['account_id'],
'tax_code_id': tax['tax_code_id'],
'tax_amount': tax['amount'] * tax['base_sign'],
}
res.append(assoc_tax)
return res
def move_line_get_item(self, cr, uid, line, context=None):
company = line.expense_id.company_id
property_obj = self.pool.get('ir.property')
if line.product_id:
acc = line.product_id.property_account_expense
if not acc:
acc = line.product_id.categ_id.property_account_expense_categ
if not acc:
raise osv.except_osv(_('Error!'), _('No purchase account found for the product %s (or for his category), please configure one.') % (line.product_id.name))
else:
acc = property_obj.get(cr, uid, 'property_account_expense_categ', 'product.category', context={'force_company': company.id})
if not acc:
raise osv.except_osv(_('Error!'), _('Please configure Default Expense account for Product purchase: `property_account_expense_categ`.'))
return {
'type':'src',
'name': line.name.split('\n')[0][:64],
'price_unit':line.unit_amount,
'quantity':line.unit_quantity,
'price':line.total_amount,
'account_id':acc.id,
'product_id':line.product_id.id,
'uos_id':line.uom_id.id,
'account_analytic_id':line.analytic_account.id,
}
def action_view_move(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing account.move of given expense ids.
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time'
expense = self.browse(cr, uid, ids[0], context=context)
assert expense.account_move_id
try:
dummy, view_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account', 'view_move_form')
except ValueError, e:
view_id = False
result = {
'name': _('Expense Account Move'),
'view_type': 'form',
'view_mode': 'form',
'view_id': view_id,
'res_model': 'account.move',
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'current',
'res_id': expense.account_move_id.id,
}
return result
class product_template(osv.osv):
_inherit = "product.template"
_columns = {
'hr_expense_ok': fields.boolean('Can be Expensed', help="Specify if the product can be selected in an HR expense line."),
}
class hr_expense_line(osv.osv):
_name = "hr.expense.line"
_description = "Expense Line"
def _amount(self, cr, uid, ids, field_name, arg, context=None):
if not ids:
return {}
cr.execute("SELECT l.id,COALESCE(SUM(l.unit_amount*l.unit_quantity),0) AS amount FROM hr_expense_line l WHERE id IN %s GROUP BY l.id ",(tuple(ids),))
res = dict(cr.fetchall())
return res
def _get_uom_id(self, cr, uid, context=None):
result = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'product', 'product_uom_unit')
return result and result[1] or False
_columns = {
'name': fields.char('Expense Note', size=128, required=True),
'date_value': fields.date('Date', required=True),
'expense_id': fields.many2one('hr.expense.expense', 'Expense', ondelete='cascade', select=True),
'total_amount': fields.function(_amount, string='Total', digits_compute=dp.get_precision('Account')),
'unit_amount': fields.float('Unit Price', digits_compute=dp.get_precision('Product Price')),
'unit_quantity': fields.float('Quantities', digits_compute= dp.get_precision('Product Unit of Measure')),
'product_id': fields.many2one('product.product', 'Product', domain=[('hr_expense_ok','=',True)]),
'uom_id': fields.many2one('product.uom', 'Unit of Measure', required=True),
'description': fields.text('Description'),
'analytic_account': fields.many2one('account.analytic.account','Analytic account'),
'ref': fields.char('Reference', size=32),
'sequence': fields.integer('Sequence', select=True, help="Gives the sequence order when displaying a list of expense lines."),
}
_defaults = {
'unit_quantity': 1,
'date_value': lambda *a: time.strftime('%Y-%m-%d'),
'uom_id': _get_uom_id,
}
_order = "sequence, date_value desc"
def onchange_product_id(self, cr, uid, ids, product_id, context=None):
res = {}
if product_id:
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
res['name'] = product.name
amount_unit = product.price_get('standard_price')[product.id]
res['unit_amount'] = amount_unit
res['uom_id'] = product.uom_id.id
return {'value': res}
def onchange_uom(self, cr, uid, ids, product_id, uom_id, context=None):
res = {'value':{}}
if not uom_id or not product_id:
return res
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
uom = self.pool.get('product.uom').browse(cr, uid, uom_id, context=context)
if uom.category_id.id != product.uom_id.category_id.id:
res['warning'] = {'title': _('Warning'), 'message': _('Selected Unit of Measure does not belong to the same category as the product Unit of Measure')}
res['value'].update({'uom_id': product.uom_id.id})
return res
class account_move_line(osv.osv):
_inherit = "account.move.line"
def reconcile(self, cr, uid, ids, type='auto', writeoff_acc_id=False, writeoff_period_id=False, writeoff_journal_id=False, context=None):
res = super(account_move_line, self).reconcile(cr, uid, ids, type=type, writeoff_acc_id=writeoff_acc_id, writeoff_period_id=writeoff_period_id, writeoff_journal_id=writeoff_journal_id, context=context)
#when making a full reconciliation of account move lines 'ids', we may need to recompute the state of some hr.expense
account_move_ids = [aml.move_id.id for aml in self.browse(cr, uid, ids, context=context)]
expense_obj = self.pool.get('hr.expense.expense')
currency_obj = self.pool.get('res.currency')
if account_move_ids:
expense_ids = expense_obj.search(cr, uid, [('account_move_id', 'in', account_move_ids)], context=context)
for expense in expense_obj.browse(cr, uid, expense_ids, context=context):
if expense.state == 'done':
#making the postulate it has to be set paid, then trying to invalidate it
new_status_is_paid = True
for aml in expense.account_move_id.line_id:
if aml.account_id.type == 'payable' and not currency_obj.is_zero(cr, uid, expense.company_id.currency_id, aml.amount_residual):
new_status_is_paid = False
if new_status_is_paid:
expense_obj.write(cr, uid, [expense.id], {'state': 'paid'}, context=context)
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -5,828,210,009,029,769,000 | 51.514523 | 209 | 0.578935 | false |
geometalab/Vector-Tiles-Reader-QGIS-Plugin | ext-libs/google/protobuf/internal/text_encoding_test.py | 126 | 2903 | #! /usr/bin/env python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for google.protobuf.text_encoding."""
try:
import unittest2 as unittest #PY26
except ImportError:
import unittest
from google.protobuf import text_encoding
TEST_VALUES = [
("foo\\rbar\\nbaz\\t",
"foo\\rbar\\nbaz\\t",
b"foo\rbar\nbaz\t"),
("\\'full of \\\"sound\\\" and \\\"fury\\\"\\'",
"\\'full of \\\"sound\\\" and \\\"fury\\\"\\'",
b"'full of \"sound\" and \"fury\"'"),
("signi\\\\fying\\\\ nothing\\\\",
"signi\\\\fying\\\\ nothing\\\\",
b"signi\\fying\\ nothing\\"),
("\\010\\t\\n\\013\\014\\r",
"\x08\\t\\n\x0b\x0c\\r",
b"\010\011\012\013\014\015")]
class TextEncodingTestCase(unittest.TestCase):
def testCEscape(self):
for escaped, escaped_utf8, unescaped in TEST_VALUES:
self.assertEqual(escaped,
text_encoding.CEscape(unescaped, as_utf8=False))
self.assertEqual(escaped_utf8,
text_encoding.CEscape(unescaped, as_utf8=True))
def testCUnescape(self):
for escaped, escaped_utf8, unescaped in TEST_VALUES:
self.assertEqual(unescaped, text_encoding.CUnescape(escaped))
self.assertEqual(unescaped, text_encoding.CUnescape(escaped_utf8))
if __name__ == "__main__":
unittest.main()
| gpl-2.0 | -7,788,057,653,604,887,000 | 39.319444 | 72 | 0.703066 | false |
emilio/servo | tests/wpt/web-platform-tests/tools/third_party/html5lib/html5lib/tests/test_sanitizer.py | 29 | 5540 | from __future__ import absolute_import, division, unicode_literals
from html5lib import constants, parseFragment, serialize
from html5lib.filters import sanitizer
def runSanitizerTest(_, expected, input):
parsed = parseFragment(expected)
expected = serialize(parsed,
omit_optional_tags=False,
use_trailing_solidus=True,
space_before_trailing_solidus=False,
quote_attr_values="always",
quote_char='"',
alphabetical_attributes=True)
assert expected == sanitize_html(input)
def sanitize_html(stream):
parsed = parseFragment(stream)
serialized = serialize(parsed,
sanitize=True,
omit_optional_tags=False,
use_trailing_solidus=True,
space_before_trailing_solidus=False,
quote_attr_values="always",
quote_char='"',
alphabetical_attributes=True)
return serialized
def test_should_handle_astral_plane_characters():
sanitized = sanitize_html("<p>𝒵 𝔸</p>")
expected = '<p>\U0001d4b5 \U0001d538</p>'
assert expected == sanitized
def test_should_allow_relative_uris():
sanitized = sanitize_html('<p><a href="/example.com"></a></p>')
expected = '<p><a href="/example.com"></a></p>'
assert expected == sanitized
def test_invalid_data_uri():
sanitized = sanitize_html('<audio controls="" src="data:foobar"></audio>')
expected = '<audio controls></audio>'
assert expected == sanitized
def test_invalid_ipv6_url():
sanitized = sanitize_html('<a href="h://]">')
expected = "<a></a>"
assert expected == sanitized
def test_data_uri_disallowed_type():
sanitized = sanitize_html('<audio controls="" src="data:text/html,<html>"></audio>')
expected = "<audio controls></audio>"
assert expected == sanitized
def test_sanitizer():
for ns, tag_name in sanitizer.allowed_elements:
if ns != constants.namespaces["html"]:
continue
if tag_name in ['caption', 'col', 'colgroup', 'optgroup', 'option', 'table', 'tbody', 'td',
'tfoot', 'th', 'thead', 'tr', 'select']:
continue # TODO
if tag_name == 'image':
yield (runSanitizerTest, "test_should_allow_%s_tag" % tag_name,
"<img title=\"1\"/>foo <bad>bar</bad> baz",
"<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name))
elif tag_name == 'br':
yield (runSanitizerTest, "test_should_allow_%s_tag" % tag_name,
"<br title=\"1\"/>foo <bad>bar</bad> baz<br/>",
"<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name))
elif tag_name in constants.voidElements:
yield (runSanitizerTest, "test_should_allow_%s_tag" % tag_name,
"<%s title=\"1\"/>foo <bad>bar</bad> baz" % tag_name,
"<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name))
else:
yield (runSanitizerTest, "test_should_allow_%s_tag" % tag_name,
"<%s title=\"1\">foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name),
"<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name))
for ns, attribute_name in sanitizer.allowed_attributes:
if ns is not None:
continue
if attribute_name != attribute_name.lower():
continue # TODO
if attribute_name == 'style':
continue
attribute_value = 'foo'
if attribute_name in sanitizer.attr_val_is_uri:
attribute_value = '%s://sub.domain.tld/path/object.ext' % sanitizer.allowed_protocols[0]
yield (runSanitizerTest, "test_should_allow_%s_attribute" % attribute_name,
"<p %s=\"%s\">foo <bad>bar</bad> baz</p>" % (attribute_name, attribute_value),
"<p %s='%s'>foo <bad>bar</bad> baz</p>" % (attribute_name, attribute_value))
for protocol in sanitizer.allowed_protocols:
rest_of_uri = '//sub.domain.tld/path/object.ext'
if protocol == 'data':
rest_of_uri = 'image/png;base64,aGVsbG8gd29ybGQ='
yield (runSanitizerTest, "test_should_allow_uppercase_%s_uris" % protocol,
"<img src=\"%s:%s\">foo</a>" % (protocol, rest_of_uri),
"""<img src="%s:%s">foo</a>""" % (protocol, rest_of_uri))
for protocol in sanitizer.allowed_protocols:
rest_of_uri = '//sub.domain.tld/path/object.ext'
if protocol == 'data':
rest_of_uri = 'image/png;base64,aGVsbG8gd29ybGQ='
protocol = protocol.upper()
yield (runSanitizerTest, "test_should_allow_uppercase_%s_uris" % protocol,
"<img src=\"%s:%s\">foo</a>" % (protocol, rest_of_uri),
"""<img src="%s:%s">foo</a>""" % (protocol, rest_of_uri))
def test_lowercase_color_codes_in_style():
sanitized = sanitize_html("<p style=\"border: 1px solid #a2a2a2;\"></p>")
expected = '<p style=\"border: 1px solid #a2a2a2;\"></p>'
assert expected == sanitized
def test_uppercase_color_codes_in_style():
sanitized = sanitize_html("<p style=\"border: 1px solid #A2A2A2;\"></p>")
expected = '<p style=\"border: 1px solid #A2A2A2;\"></p>'
assert expected == sanitized
| mpl-2.0 | -8,783,747,234,179,834,000 | 42.622047 | 105 | 0.561913 | false |
benoitsteiner/tensorflow-xsmm | tensorflow/python/profiler/pprof_profiler_test.py | 70 | 5174 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pprof_profiler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
from proto import profile_pb2
from tensorflow.core.framework import step_stats_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.profiler import pprof_profiler
class PprofProfilerTest(test.TestCase):
def testDataEmpty(self):
output_dir = test.get_temp_dir()
run_metadata = config_pb2.RunMetadata()
graph = test.mock.MagicMock()
graph.get_operations.return_value = []
profiles = pprof_profiler.get_profiles(graph, run_metadata)
self.assertEquals(0, len(profiles))
profile_files = pprof_profiler.profile(
graph, run_metadata, output_dir)
self.assertEquals(0, len(profile_files))
def testRunMetadataEmpty(self):
output_dir = test.get_temp_dir()
run_metadata = config_pb2.RunMetadata()
graph = test.mock.MagicMock()
op1 = test.mock.MagicMock()
op1.name = 'Add/123'
op1.traceback = [('a/b/file1', 10, 'some_var')]
op1.type = 'add'
graph.get_operations.return_value = [op1]
profiles = pprof_profiler.get_profiles(graph, run_metadata)
self.assertEquals(0, len(profiles))
profile_files = pprof_profiler.profile(
graph, run_metadata, output_dir)
self.assertEquals(0, len(profile_files))
def testValidProfile(self):
output_dir = test.get_temp_dir()
run_metadata = config_pb2.RunMetadata()
node1 = step_stats_pb2.NodeExecStats(
node_name='Add/123',
op_start_rel_micros=3,
op_end_rel_micros=5,
all_end_rel_micros=4)
run_metadata = config_pb2.RunMetadata()
device1 = run_metadata.step_stats.dev_stats.add()
device1.device = 'deviceA'
device1.node_stats.extend([node1])
graph = test.mock.MagicMock()
op1 = test.mock.MagicMock()
op1.name = 'Add/123'
op1.traceback = [
('a/b/file1', 10, 'apply_op', 'abc'), ('a/c/file2', 12, 'my_op', 'def')]
op1.type = 'add'
graph.get_operations.return_value = [op1]
expected_proto = """sample_type {
type: 5
unit: 5
}
sample_type {
type: 6
unit: 7
}
sample_type {
type: 8
unit: 7
}
sample {
value: 1
value: 4
value: 2
label {
key: 1
str: 2
}
label {
key: 3
str: 4
}
}
string_table: ""
string_table: "node_name"
string_table: "Add/123"
string_table: "op_type"
string_table: "add"
string_table: "count"
string_table: "all_time"
string_table: "nanoseconds"
string_table: "op_time"
string_table: "Device 1 of 1: deviceA"
comment: 9
"""
# Test with protos
profiles = pprof_profiler.get_profiles(graph, run_metadata)
self.assertEquals(1, len(profiles))
self.assertTrue('deviceA' in profiles)
self.assertEquals(expected_proto, str(profiles['deviceA']))
# Test with files
profile_files = pprof_profiler.profile(
graph, run_metadata, output_dir)
self.assertEquals(1, len(profile_files))
with gzip.open(profile_files[0]) as profile_file:
profile_contents = profile_file.read()
profile = profile_pb2.Profile()
profile.ParseFromString(profile_contents)
self.assertEquals(expected_proto, str(profile))
def testProfileWithWhileLoop(self):
options = config_pb2.RunOptions()
options.trace_level = config_pb2.RunOptions.FULL_TRACE
run_metadata = config_pb2.RunMetadata()
num_iters = 5
with self.test_session() as sess:
i = constant_op.constant(0)
c = lambda i: math_ops.less(i, num_iters)
b = lambda i: math_ops.add(i, 1)
r = control_flow_ops.while_loop(c, b, [i])
sess.run(r, options=options, run_metadata=run_metadata)
profiles = pprof_profiler.get_profiles(sess.graph, run_metadata)
self.assertEquals(1, len(profiles))
profile = next(iter(profiles.values()))
add_samples = [] # Samples for the while/Add node
for sample in profile.sample:
if profile.string_table[sample.label[0].str] == 'while/Add':
add_samples.append(sample)
# Values for same nodes are aggregated.
self.assertEquals(1, len(add_samples))
# Value of "count" should be equal to number of iterations.
self.assertEquals(num_iters, add_samples[0].value[0])
if __name__ == '__main__':
test.main()
| apache-2.0 | 9,124,647,373,301,902,000 | 30.54878 | 80 | 0.671821 | false |
odootr/odoo | addons/base_report_designer/plugin/openerp_report_designer/bin/script/Translation.py | 384 | 11372 | #########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer [email protected]
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import uno
import string
import unohelper
import xmlrpclib
from com.sun.star.task import XJobExecutor
if __name__<>"package":
from lib.gui import *
from lib.functions import *
from lib.error import ErrorDialog
from LoginTest import *
from lib.rpc import *
database="test_001"
uid = 3
class AddLang(unohelper.Base, XJobExecutor ):
def __init__(self, sVariable="", sFields="", sDisplayName="", bFromModify=False):
LoginTest()
if not loginstatus and __name__=="package":
exit(1)
global passwd
self.password = passwd
self.win = DBModalDialog(60, 50, 180, 225, "Set Lang Builder")
self.win.addFixedText("lblVariable", 27, 12, 60, 15, "Variable :")
self.win.addComboBox("cmbVariable", 180-120-2, 10, 120, 15,True,
itemListenerProc=self.cmbVariable_selected)
self.insVariable = self.win.getControl( "cmbVariable" )
self.win.addFixedText("lblFields", 10, 32, 60, 15, "Variable Fields :")
self.win.addComboListBox("lstFields", 180-120-2, 30, 120, 150, False,itemListenerProc=self.lstbox_selected)
self.insField = self.win.getControl( "lstFields" )
self.win.addFixedText("lblUName", 8, 187, 60, 15, "Displayed name :")
self.win.addEdit("txtUName", 180-120-2, 185, 120, 15,)
self.win.addButton('btnOK',-5 ,-5, 45, 15, 'Ok', actionListenerProc = self.btnOk_clicked )
self.win.addButton('btnCancel',-5 - 45 - 5 ,-5,45,15,'Cancel', actionListenerProc = self.btnCancel_clicked )
self.sValue=None
self.sObj=None
self.aSectionList=[]
self.sGDisplayName=sDisplayName
self.aItemList=[]
self.aComponentAdd=[]
self.aObjectList=[]
self.aListFields=[]
self.aVariableList=[]
EnumDocument(self.aItemList,self.aComponentAdd)
desktop=getDesktop()
doc =desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
self.sMyHost= ""
global url
self.sock=RPCSession(url)
if not docinfo.getUserFieldValue(3) == "" and not docinfo.getUserFieldValue(0)=="":
self.sMyHost= docinfo.getUserFieldValue(0)
self.count=0
oParEnum = doc.getTextFields().createEnumeration()
while oParEnum.hasMoreElements():
oPar = oParEnum.nextElement()
if oPar.supportsService("com.sun.star.text.TextField.DropDown"):
self.count += 1
getList(self.aObjectList, self.sMyHost,self.count)
cursor = doc.getCurrentController().getViewCursor()
text=cursor.getText()
tcur=text.createTextCursorByRange(cursor)
self.aVariableList.extend( filter( lambda obj: obj[:obj.find("(")] == "Objects", self.aObjectList ) )
for i in range(len(self.aItemList)):
anItem = self.aItemList[i][1]
component = self.aComponentAdd[i]
if component == "Document":
sLVal = anItem[anItem.find(",'") + 2:anItem.find("')")]
self.aVariableList.extend( filter( lambda obj: obj[:obj.find("(")] == sLVal, self.aObjectList ) )
if tcur.TextSection:
getRecersiveSection(tcur.TextSection,self.aSectionList)
if component in self.aSectionList:
sLVal = anItem[anItem.find(",'") + 2:anItem.find("')")]
self.aVariableList.extend( filter( lambda obj: obj[:obj.find("(")] == sLVal, self.aObjectList ) )
if tcur.TextTable:
if not component == "Document" and component[component.rfind(".") + 1:] == tcur.TextTable.Name:
VariableScope(tcur,self.insVariable,self.aObjectList,self.aComponentAdd,self.aItemList,component)
self.bModify=bFromModify
if self.bModify==True:
sItem=""
for anObject in self.aObjectList:
if anObject[:anObject.find("(")] == sVariable:
sItem = anObject
self.insVariable.setText( sItem )
genTree(sItem[sItem.find("(")+1:sItem.find(")")],self.aListFields, self.insField,self.sMyHost,2,ending_excl=['one2many','many2one','many2many','reference'], recur=['many2one'])
self.sValue= self.win.getListBoxItem("lstFields",self.aListFields.index(sFields))
for var in self.aVariableList:
self.model_ids = self.sock.execute(database, uid, self.password, 'ir.model' , 'search', [('model','=',var[var.find("(")+1:var.find(")")])])
fields=['name','model']
self.model_res = self.sock.execute(database, uid, self.password, 'ir.model', 'read', self.model_ids,fields)
if self.model_res <> []:
self.insVariable.addItem(var[:var.find("(")+1] + self.model_res[0]['name'] + ")" ,self.insVariable.getItemCount())
else:
self.insVariable.addItem(var ,self.insVariable.getItemCount())
self.win.doModalDialog("lstFields",self.sValue)
else:
ErrorDialog("Please insert user define field Field-1 or Field-4","Just go to File->Properties->User Define \nField-1 E.g. http://localhost:8069 \nOR \nField-4 E.g. account.invoice")
self.win.endExecute()
def lstbox_selected(self, oItemEvent):
try:
desktop=getDesktop()
doc =desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
sItem= self.win.getComboBoxText("cmbVariable")
for var in self.aVariableList:
if var[:var.find("(")+1]==sItem[:sItem.find("(")+1]:
sItem = var
sMain=self.aListFields[self.win.getListBoxSelectedItemPos("lstFields")]
t=sMain.rfind('/lang')
if t!=-1:
sObject=self.getRes(self.sock,sItem[sItem.find("(")+1:-1],sMain[1:])
ids = self.sock.execute(database, uid, self.password, sObject , 'search', [])
res = self.sock.execute(database, uid, self.password, sObject , 'read',[ids[0]])
self.win.setEditText("txtUName",res[0][sMain[sMain.rfind("/")+1:]])
else:
ErrorDialog("Please select a language.")
except:
import traceback;traceback.print_exc()
self.win.setEditText("txtUName","TTT")
if self.bModify:
self.win.setEditText("txtUName",self.sGDisplayName)
def getRes(self, sock, sObject, sVar):
desktop=getDesktop()
doc =desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
res = sock.execute(database, uid, self.password, sObject , 'fields_get')
key = res.keys()
key.sort()
myval=None
if not sVar.find("/")==-1:
myval=sVar[:sVar.find("/")]
else:
myval=sVar
if myval in key:
if (res[myval]['type'] in ['many2one']):
sObject = res[myval]['relation']
return self.getRes(sock,res[myval]['relation'], sVar[sVar.find("/")+1:])
else:
return sObject
def cmbVariable_selected(self, oItemEvent):
if self.count > 0 :
try:
desktop=getDesktop()
doc =desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
self.win.removeListBoxItems("lstFields", 0, self.win.getListBoxItemCount("lstFields"))
self.aListFields=[]
tempItem = self.win.getComboBoxText("cmbVariable")
for var in self.aVariableList:
if var[:var.find("(")] == tempItem[:tempItem.find("(")]:
sItem=var
genTree(
sItem[ sItem.find("(") + 1:sItem.find(")")],
self.aListFields,
self.insField,
self.sMyHost,
2,
ending_excl=['one2many','many2one','many2many','reference'],
recur=['many2one']
)
except:
import traceback;traceback.print_exc()
def btnOk_clicked(self, oActionEvent):
self.bOkay = True
desktop=getDesktop()
doc = desktop.getCurrentComponent()
cursor = doc.getCurrentController().getViewCursor()
itemSelected = self.win.getListBoxSelectedItem( "lstFields" )
itemSelectedPos = self.win.getListBoxSelectedItemPos( "lstFields" )
txtUName = self.win.getEditText("txtUName")
sKey=u""+ txtUName
if itemSelected != "" and txtUName != "" and self.bModify==True :
oCurObj=cursor.TextField
sObjName=self.insVariable.getText()
sObjName=sObjName[:sObjName.find("(")]
sValue=u"[[ setLang" + sObjName + self.aListFields[itemSelectedPos].replace("/",".") + ")" " ]]"
oCurObj.Items = (sKey,sValue)
oCurObj.update()
self.win.endExecute()
elif itemSelected != "" and txtUName != "" :
oInputList = doc.createInstance("com.sun.star.text.TextField.DropDown")
sObjName=self.win.getComboBoxText("cmbVariable")
sObjName=sObjName[:sObjName.find("(")]
widget = ( cursor.TextTable and cursor.TextTable.getCellByName( cursor.Cell.CellName ) or doc.Text )
sValue = u"[[setLang" + "(" + sObjName + self.aListFields[itemSelectedPos].replace("/",".") +")" " ]]"
oInputList.Items = (sKey,sValue)
widget.insertTextContent(cursor,oInputList,False)
self.win.endExecute()
else:
ErrorDialog("Please fill appropriate data in name field \nor select particular value from the list of fields.")
def btnCancel_clicked(self, oActionEvent):
self.win.endExecute()
if __name__<>"package" and __name__=="__main__":
AddLang()
elif __name__=="package":
g_ImplementationHelper.addImplementation( AddLang, "org.openoffice.openerp.report.langtag", ("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -783,279,875,261,735,600 | 44.126984 | 193 | 0.581428 | false |
ozzyjohnson/zaproxy | python/api/src/zapv2/users.py | 15 | 2918 | # Zed Attack Proxy (ZAP) and its related class files.
#
# ZAP is an HTTP/HTTPS proxy for assessing web application security.
#
# Copyright 2015 the ZAP development team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file was automatically generated.
"""
class users(object):
def __init__(self, zap):
self.zap = zap
def users_list(self, contextid=''):
return next(self.zap._request(self.zap.base + 'users/view/usersList/', {'contextId' : contextid}).itervalues())
def get_user_by_id(self, contextid='', userid=''):
return next(self.zap._request(self.zap.base + 'users/view/getUserById/', {'contextId' : contextid, 'userId' : userid}).itervalues())
def get_authentication_credentials_config_params(self, contextid):
return next(self.zap._request(self.zap.base + 'users/view/getAuthenticationCredentialsConfigParams/', {'contextId' : contextid}).itervalues())
def get_authentication_credentials(self, contextid, userid):
return next(self.zap._request(self.zap.base + 'users/view/getAuthenticationCredentials/', {'contextId' : contextid, 'userId' : userid}).itervalues())
def new_user(self, contextid, name, apikey=''):
return next(self.zap._request(self.zap.base + 'users/action/newUser/', {'contextId' : contextid, 'name' : name, 'apikey' : apikey}).itervalues())
def remove_user(self, contextid, userid, apikey=''):
return next(self.zap._request(self.zap.base + 'users/action/removeUser/', {'contextId' : contextid, 'userId' : userid, 'apikey' : apikey}).itervalues())
def set_user_enabled(self, contextid, userid, enabled, apikey=''):
return next(self.zap._request(self.zap.base + 'users/action/setUserEnabled/', {'contextId' : contextid, 'userId' : userid, 'enabled' : enabled, 'apikey' : apikey}).itervalues())
def set_user_name(self, contextid, userid, name, apikey=''):
return next(self.zap._request(self.zap.base + 'users/action/setUserName/', {'contextId' : contextid, 'userId' : userid, 'name' : name, 'apikey' : apikey}).itervalues())
def set_authentication_credentials(self, contextid, userid, authcredentialsconfigparams='', apikey=''):
return next(self.zap._request(self.zap.base + 'users/action/setAuthenticationCredentials/', {'contextId' : contextid, 'userId' : userid, 'authCredentialsConfigParams' : authcredentialsconfigparams, 'apikey' : apikey}).itervalues())
| apache-2.0 | -1,770,106,822,242,331,600 | 53.037037 | 239 | 0.705963 | false |
ajnirp/servo | tests/wpt/web-platform-tests/tools/pytest/testing/test_unittest.py | 171 | 22302 | from _pytest.main import EXIT_NOTESTSCOLLECTED
import pytest
def test_simple_unittest(testdir):
testpath = testdir.makepyfile("""
import unittest
class MyTestCase(unittest.TestCase):
def testpassing(self):
self.assertEquals('foo', 'foo')
def test_failing(self):
self.assertEquals('foo', 'bar')
""")
reprec = testdir.inline_run(testpath)
assert reprec.matchreport("testpassing").passed
assert reprec.matchreport("test_failing").failed
def test_runTest_method(testdir):
testdir.makepyfile("""
import unittest
class MyTestCaseWithRunTest(unittest.TestCase):
def runTest(self):
self.assertEquals('foo', 'foo')
class MyTestCaseWithoutRunTest(unittest.TestCase):
def runTest(self):
self.assertEquals('foo', 'foo')
def test_something(self):
pass
""")
result = testdir.runpytest("-v")
result.stdout.fnmatch_lines("""
*MyTestCaseWithRunTest::runTest*
*MyTestCaseWithoutRunTest::test_something*
*2 passed*
""")
def test_isclasscheck_issue53(testdir):
testpath = testdir.makepyfile("""
import unittest
class _E(object):
def __getattr__(self, tag):
pass
E = _E()
""")
result = testdir.runpytest(testpath)
assert result.ret == EXIT_NOTESTSCOLLECTED
def test_setup(testdir):
testpath = testdir.makepyfile("""
import unittest
class MyTestCase(unittest.TestCase):
def setUp(self):
self.foo = 1
def setup_method(self, method):
self.foo2 = 1
def test_both(self):
self.assertEquals(1, self.foo)
assert self.foo2 == 1
def teardown_method(self, method):
assert 0, "42"
""")
reprec = testdir.inline_run("-s", testpath)
assert reprec.matchreport("test_both", when="call").passed
rep = reprec.matchreport("test_both", when="teardown")
assert rep.failed and '42' in str(rep.longrepr)
def test_setUpModule(testdir):
testpath = testdir.makepyfile("""
l = []
def setUpModule():
l.append(1)
def tearDownModule():
del l[0]
def test_hello():
assert l == [1]
def test_world():
assert l == [1]
""")
result = testdir.runpytest(testpath)
result.stdout.fnmatch_lines([
"*2 passed*",
])
def test_setUpModule_failing_no_teardown(testdir):
testpath = testdir.makepyfile("""
l = []
def setUpModule():
0/0
def tearDownModule():
l.append(1)
def test_hello():
pass
""")
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=0, failed=1)
call = reprec.getcalls("pytest_runtest_setup")[0]
assert not call.item.module.l
def test_new_instances(testdir):
testpath = testdir.makepyfile("""
import unittest
class MyTestCase(unittest.TestCase):
def test_func1(self):
self.x = 2
def test_func2(self):
assert not hasattr(self, 'x')
""")
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=2)
def test_teardown(testdir):
testpath = testdir.makepyfile("""
import unittest
class MyTestCase(unittest.TestCase):
l = []
def test_one(self):
pass
def tearDown(self):
self.l.append(None)
class Second(unittest.TestCase):
def test_check(self):
self.assertEquals(MyTestCase.l, [None])
""")
reprec = testdir.inline_run(testpath)
passed, skipped, failed = reprec.countoutcomes()
assert failed == 0, failed
assert passed == 2
assert passed + skipped + failed == 2
@pytest.mark.skipif("sys.version_info < (2,7)")
def test_unittest_skip_issue148(testdir):
testpath = testdir.makepyfile("""
import unittest
@unittest.skip("hello")
class MyTestCase(unittest.TestCase):
@classmethod
def setUpClass(self):
xxx
def test_one(self):
pass
@classmethod
def tearDownClass(self):
xxx
""")
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(skipped=1)
def test_method_and_teardown_failing_reporting(testdir):
testdir.makepyfile("""
import unittest, pytest
class TC(unittest.TestCase):
def tearDown(self):
assert 0, "down1"
def test_method(self):
assert False, "down2"
""")
result = testdir.runpytest("-s")
assert result.ret == 1
result.stdout.fnmatch_lines([
"*tearDown*",
"*assert 0*",
"*test_method*",
"*assert False*",
"*1 failed*1 error*",
])
def test_setup_failure_is_shown(testdir):
testdir.makepyfile("""
import unittest
import pytest
class TC(unittest.TestCase):
def setUp(self):
assert 0, "down1"
def test_method(self):
print ("never42")
xyz
""")
result = testdir.runpytest("-s")
assert result.ret == 1
result.stdout.fnmatch_lines([
"*setUp*",
"*assert 0*down1*",
"*1 failed*",
])
assert 'never42' not in result.stdout.str()
def test_setup_setUpClass(testdir):
testpath = testdir.makepyfile("""
import unittest
import pytest
class MyTestCase(unittest.TestCase):
x = 0
@classmethod
def setUpClass(cls):
cls.x += 1
def test_func1(self):
assert self.x == 1
def test_func2(self):
assert self.x == 1
@classmethod
def tearDownClass(cls):
cls.x -= 1
def test_teareddown():
assert MyTestCase.x == 0
""")
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=3)
def test_setup_class(testdir):
testpath = testdir.makepyfile("""
import unittest
import pytest
class MyTestCase(unittest.TestCase):
x = 0
def setup_class(cls):
cls.x += 1
def test_func1(self):
assert self.x == 1
def test_func2(self):
assert self.x == 1
def teardown_class(cls):
cls.x -= 1
def test_teareddown():
assert MyTestCase.x == 0
""")
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=3)
@pytest.mark.parametrize("type", ['Error', 'Failure'])
def test_testcase_adderrorandfailure_defers(testdir, type):
testdir.makepyfile("""
from unittest import TestCase
import pytest
class MyTestCase(TestCase):
def run(self, result):
excinfo = pytest.raises(ZeroDivisionError, lambda: 0/0)
try:
result.add%s(self, excinfo._excinfo)
except KeyboardInterrupt:
raise
except:
pytest.fail("add%s should not raise")
def test_hello(self):
pass
""" % (type, type))
result = testdir.runpytest()
assert 'should not raise' not in result.stdout.str()
@pytest.mark.parametrize("type", ['Error', 'Failure'])
def test_testcase_custom_exception_info(testdir, type):
testdir.makepyfile("""
from unittest import TestCase
import py, pytest
import _pytest._code
class MyTestCase(TestCase):
def run(self, result):
excinfo = pytest.raises(ZeroDivisionError, lambda: 0/0)
# we fake an incompatible exception info
from _pytest.monkeypatch import monkeypatch
mp = monkeypatch()
def t(*args):
mp.undo()
raise TypeError()
mp.setattr(_pytest._code, 'ExceptionInfo', t)
try:
excinfo = excinfo._excinfo
result.add%(type)s(self, excinfo)
finally:
mp.undo()
def test_hello(self):
pass
""" % locals())
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"NOTE: Incompatible Exception Representation*",
"*ZeroDivisionError*",
"*1 failed*",
])
def test_testcase_totally_incompatible_exception_info(testdir):
item, = testdir.getitems("""
from unittest import TestCase
class MyTestCase(TestCase):
def test_hello(self):
pass
""")
item.addError(None, 42)
excinfo = item._excinfo.pop(0)
assert 'ERROR: Unknown Incompatible' in str(excinfo.getrepr())
def test_module_level_pytestmark(testdir):
testpath = testdir.makepyfile("""
import unittest
import pytest
pytestmark = pytest.mark.xfail
class MyTestCase(unittest.TestCase):
def test_func1(self):
assert 0
""")
reprec = testdir.inline_run(testpath, "-s")
reprec.assertoutcome(skipped=1)
def test_trial_testcase_skip_property(testdir):
pytest.importorskip('twisted.trial.unittest')
testpath = testdir.makepyfile("""
from twisted.trial import unittest
class MyTestCase(unittest.TestCase):
skip = 'dont run'
def test_func(self):
pass
""")
reprec = testdir.inline_run(testpath, "-s")
reprec.assertoutcome(skipped=1)
def test_trial_testfunction_skip_property(testdir):
pytest.importorskip('twisted.trial.unittest')
testpath = testdir.makepyfile("""
from twisted.trial import unittest
class MyTestCase(unittest.TestCase):
def test_func(self):
pass
test_func.skip = 'dont run'
""")
reprec = testdir.inline_run(testpath, "-s")
reprec.assertoutcome(skipped=1)
def test_trial_testcase_todo_property(testdir):
pytest.importorskip('twisted.trial.unittest')
testpath = testdir.makepyfile("""
from twisted.trial import unittest
class MyTestCase(unittest.TestCase):
todo = 'dont run'
def test_func(self):
assert 0
""")
reprec = testdir.inline_run(testpath, "-s")
reprec.assertoutcome(skipped=1)
def test_trial_testfunction_todo_property(testdir):
pytest.importorskip('twisted.trial.unittest')
testpath = testdir.makepyfile("""
from twisted.trial import unittest
class MyTestCase(unittest.TestCase):
def test_func(self):
assert 0
test_func.todo = 'dont run'
""")
reprec = testdir.inline_run(testpath, "-s")
reprec.assertoutcome(skipped=1)
class TestTrialUnittest:
def setup_class(cls):
cls.ut = pytest.importorskip("twisted.trial.unittest")
def test_trial_testcase_runtest_not_collected(self, testdir):
testdir.makepyfile("""
from twisted.trial.unittest import TestCase
class TC(TestCase):
def test_hello(self):
pass
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
testdir.makepyfile("""
from twisted.trial.unittest import TestCase
class TC(TestCase):
def runTest(self):
pass
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_trial_exceptions_with_skips(self, testdir):
testdir.makepyfile("""
from twisted.trial import unittest
import pytest
class TC(unittest.TestCase):
def test_hello(self):
pytest.skip("skip_in_method")
@pytest.mark.skipif("sys.version_info != 1")
def test_hello2(self):
pass
@pytest.mark.xfail(reason="iwanto")
def test_hello3(self):
assert 0
def test_hello4(self):
pytest.xfail("i2wanto")
def test_trial_skip(self):
pass
test_trial_skip.skip = "trialselfskip"
def test_trial_todo(self):
assert 0
test_trial_todo.todo = "mytodo"
def test_trial_todo_success(self):
pass
test_trial_todo_success.todo = "mytodo"
class TC2(unittest.TestCase):
def setup_class(cls):
pytest.skip("skip_in_setup_class")
def test_method(self):
pass
""")
result = testdir.runpytest("-rxs")
assert result.ret == 0
result.stdout.fnmatch_lines_random([
"*XFAIL*test_trial_todo*",
"*trialselfskip*",
"*skip_in_setup_class*",
"*iwanto*",
"*i2wanto*",
"*sys.version_info*",
"*skip_in_method*",
"*4 skipped*3 xfail*1 xpass*",
])
def test_trial_error(self, testdir):
testdir.makepyfile("""
from twisted.trial.unittest import TestCase
from twisted.internet.defer import Deferred
from twisted.internet import reactor
class TC(TestCase):
def test_one(self):
crash
def test_two(self):
def f(_):
crash
d = Deferred()
d.addCallback(f)
reactor.callLater(0.3, d.callback, None)
return d
def test_three(self):
def f():
pass # will never get called
reactor.callLater(0.3, f)
# will crash at teardown
def test_four(self):
def f(_):
reactor.callLater(0.3, f)
crash
d = Deferred()
d.addCallback(f)
reactor.callLater(0.3, d.callback, None)
return d
# will crash both at test time and at teardown
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*ERRORS*",
"*DelayedCalls*",
"*test_four*",
"*NameError*crash*",
"*test_one*",
"*NameError*crash*",
"*test_three*",
"*DelayedCalls*",
"*test_two*",
"*crash*",
])
def test_trial_pdb(self, testdir):
p = testdir.makepyfile("""
from twisted.trial import unittest
import pytest
class TC(unittest.TestCase):
def test_hello(self):
assert 0, "hellopdb"
""")
child = testdir.spawn_pytest(p)
child.expect("hellopdb")
child.sendeof()
def test_djangolike_testcase(testdir):
# contributed from Morten Breekevold
testdir.makepyfile("""
from unittest import TestCase, main
class DjangoLikeTestCase(TestCase):
def setUp(self):
print ("setUp()")
def test_presetup_has_been_run(self):
print ("test_thing()")
self.assertTrue(hasattr(self, 'was_presetup'))
def tearDown(self):
print ("tearDown()")
def __call__(self, result=None):
try:
self._pre_setup()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
import sys
result.addError(self, sys.exc_info())
return
super(DjangoLikeTestCase, self).__call__(result)
try:
self._post_teardown()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
import sys
result.addError(self, sys.exc_info())
return
def _pre_setup(self):
print ("_pre_setup()")
self.was_presetup = True
def _post_teardown(self):
print ("_post_teardown()")
""")
result = testdir.runpytest("-s")
assert result.ret == 0
result.stdout.fnmatch_lines([
"*_pre_setup()*",
"*setUp()*",
"*test_thing()*",
"*tearDown()*",
"*_post_teardown()*",
])
def test_unittest_not_shown_in_traceback(testdir):
testdir.makepyfile("""
import unittest
class t(unittest.TestCase):
def test_hello(self):
x = 3
self.assertEquals(x, 4)
""")
res = testdir.runpytest()
assert "failUnlessEqual" not in res.stdout.str()
def test_unorderable_types(testdir):
testdir.makepyfile("""
import unittest
class TestJoinEmpty(unittest.TestCase):
pass
def make_test():
class Test(unittest.TestCase):
pass
Test.__name__ = "TestFoo"
return Test
TestFoo = make_test()
""")
result = testdir.runpytest()
assert "TypeError" not in result.stdout.str()
assert result.ret == EXIT_NOTESTSCOLLECTED
def test_unittest_typerror_traceback(testdir):
testdir.makepyfile("""
import unittest
class TestJoinEmpty(unittest.TestCase):
def test_hello(self, arg1):
pass
""")
result = testdir.runpytest()
assert "TypeError" in result.stdout.str()
assert result.ret == 1
@pytest.mark.skipif("sys.version_info < (2,7)")
def test_unittest_unexpected_failure(testdir):
testdir.makepyfile("""
import unittest
class MyTestCase(unittest.TestCase):
@unittest.expectedFailure
def test_func1(self):
assert 0
@unittest.expectedFailure
def test_func2(self):
assert 1
""")
result = testdir.runpytest("-rxX")
result.stdout.fnmatch_lines([
"*XFAIL*MyTestCase*test_func1*",
"*XPASS*MyTestCase*test_func2*",
"*1 xfailed*1 xpass*",
])
@pytest.mark.parametrize('fix_type, stmt', [
('fixture', 'return'),
('yield_fixture', 'yield'),
])
def test_unittest_setup_interaction(testdir, fix_type, stmt):
testdir.makepyfile("""
import unittest
import pytest
class MyTestCase(unittest.TestCase):
@pytest.{fix_type}(scope="class", autouse=True)
def perclass(self, request):
request.cls.hello = "world"
{stmt}
@pytest.{fix_type}(scope="function", autouse=True)
def perfunction(self, request):
request.instance.funcname = request.function.__name__
{stmt}
def test_method1(self):
assert self.funcname == "test_method1"
assert self.hello == "world"
def test_method2(self):
assert self.funcname == "test_method2"
def test_classattr(self):
assert self.__class__.hello == "world"
""".format(fix_type=fix_type, stmt=stmt))
result = testdir.runpytest()
result.stdout.fnmatch_lines("*3 passed*")
def test_non_unittest_no_setupclass_support(testdir):
testpath = testdir.makepyfile("""
class TestFoo:
x = 0
@classmethod
def setUpClass(cls):
cls.x = 1
def test_method1(self):
assert self.x == 0
@classmethod
def tearDownClass(cls):
cls.x = 1
def test_not_teareddown():
assert TestFoo.x == 0
""")
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=2)
def test_no_teardown_if_setupclass_failed(testdir):
testpath = testdir.makepyfile("""
import unittest
class MyTestCase(unittest.TestCase):
x = 0
@classmethod
def setUpClass(cls):
cls.x = 1
assert False
def test_func1(self):
cls.x = 10
@classmethod
def tearDownClass(cls):
cls.x = 100
def test_notTornDown():
assert MyTestCase.x == 1
""")
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=1, failed=1)
def test_issue333_result_clearing(testdir):
testdir.makeconftest("""
def pytest_runtest_call(__multicall__, item):
__multicall__.execute()
assert 0
""")
testdir.makepyfile("""
import unittest
class TestIt(unittest.TestCase):
def test_func(self):
0/0
""")
reprec = testdir.inline_run()
reprec.assertoutcome(failed=1)
@pytest.mark.skipif("sys.version_info < (2,7)")
def test_unittest_raise_skip_issue748(testdir):
testdir.makepyfile(test_foo="""
import unittest
class MyTestCase(unittest.TestCase):
def test_one(self):
raise unittest.SkipTest('skipping due to reasons')
""")
result = testdir.runpytest("-v", '-rs')
result.stdout.fnmatch_lines("""
*SKIP*[1]*test_foo.py*skipping due to reasons*
*1 skipped*
""")
@pytest.mark.skipif("sys.version_info < (2,7)")
def test_unittest_skip_issue1169(testdir):
testdir.makepyfile(test_foo="""
import unittest
class MyTestCase(unittest.TestCase):
@unittest.skip("skipping due to reasons")
def test_skip(self):
self.fail()
""")
result = testdir.runpytest("-v", '-rs')
result.stdout.fnmatch_lines("""
*SKIP*[1]*skipping due to reasons*
*1 skipped*
""")
| mpl-2.0 | -667,679,839,511,862,700 | 29.260516 | 71 | 0.533002 | false |
blahgeek/xiami_downloader | lib/track.py | 1 | 2132 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by [email protected] at 2014-04-18
import os
from .decrypt import decrypt
from .http import download
from mutagen.easyid3 import EasyID3
from mutagen.id3 import ID3, APIC
class Track(object):
''' A song '''
def __init__(self, tag):
''' @tag: a BeautifulSoup Tag representing a track'''
keys = ('title', 'album_name', 'artist')
values = map(lambda x: tag.find(name=x).text, keys)
self.id3 = dict(zip(keys, values))
geturl = lambda key: tag.find(name=key).text.strip()
self.url = decrypt(geturl('location'))
self.picurl = geturl('pic')
self.lyricurl = geturl('lyric')
if not self.lyricurl.endswith('.lrc'):
self.lyricurl = None
self.filename = self.id3['artist'] + ' - ' + self.id3['title'] + '.mp3'
def download(self, save_path, progress_bar):
self.filename = download(self.url, save_path, self.filename, progress_bar)
return self.filename
def download_lyric(self, save_path, progress_bar):
if self.lyricurl is None:
progress_bar.msg('No lyric available.\n')
return
progress_bar.msg('Downloading lyric...')
download(self.lyricurl, save_path, self.filename, None)
progress_bar.msg('Done.\n')
def patch_id3(self, save_path, progress_bar):
filepath = os.path.join(save_path, self.filename).encode('utf8')
progress_bar.msg('Patching ID3...')
cover_f, mime_type = download(self.picurl)
easyid3 = EasyID3()
easyid3['title'] = self.id3['title']
easyid3['album'] = self.id3['album_name']
easyid3['artist'] = self.id3['artist'].split(';')
easyid3['performer'] = easyid3['artist'][0]
easyid3.save(filepath)
harid3 = ID3(filepath)
harid3.add(APIC(
encoding = 3,
mime = mime_type,
type = 3,
desc = 'Cover picture from xiami.com fetched by BlahGeek.',
data = cover_f.read()))
harid3.save()
progress_bar.msg('Done.\n')
| mit | -6,433,817,167,316,788,000 | 35.758621 | 82 | 0.583959 | false |
cryptickp/libcloud | libcloud/test/compute/test_types.py | 26 | 1969 | from unittest import TestCase
from libcloud.compute.types import Provider, NodeState, StorageVolumeState, \
VolumeSnapshotState, Type
class TestType(Type):
INUSE = "inuse"
class TestTestType(TestCase):
model = TestType
attribute = TestType.INUSE
def test_provider_tostring(self):
self.assertEqual(Provider.tostring(TestType.INUSE), "INUSE")
def test_provider_fromstring(self):
self.assertEqual(TestType.fromstring("inuse"), TestType.INUSE)
def test_provider_fromstring_caseinsensitive(self):
self.assertEqual(TestType.fromstring("INUSE"), TestType.INUSE)
class TestProvider(TestCase):
def test_provider_tostring(self):
self.assertEqual(Provider.tostring(Provider.RACKSPACE), "RACKSPACE")
def test_provider_fromstring(self):
self.assertEqual(Provider.fromstring("rackspace"), Provider.RACKSPACE)
class TestNodeState(TestCase):
def test_nodestate_tostring(self):
self.assertEqual(NodeState.tostring(NodeState.RUNNING), "RUNNING")
def test_nodestate_fromstring(self):
self.assertEqual(NodeState.fromstring("running"), NodeState.RUNNING)
class TestStorageVolumeState(TestCase):
def test_storagevolumestate_tostring(self):
self.assertEqual(
StorageVolumeState.tostring(StorageVolumeState.AVAILABLE),
"AVAILABLE"
)
def test_storagevolumestate_fromstring(self):
self.assertEqual(
StorageVolumeState.fromstring("available"),
StorageVolumeState.AVAILABLE
)
class TestVolumeSnapshotState(TestCase):
def test_volumesnapshotstate_tostring(self):
self.assertEqual(
VolumeSnapshotState.tostring(VolumeSnapshotState.AVAILABLE),
"AVAILABLE"
)
def test_volumesnapshotstate_fromstring(self):
self.assertEqual(
VolumeSnapshotState.fromstring("available"),
VolumeSnapshotState.AVAILABLE
)
| apache-2.0 | 4,908,080,004,619,732,000 | 27.128571 | 78 | 0.703911 | false |
lmprice/ansible | lib/ansible/modules/web_infrastructure/htpasswd.py | 48 | 8684 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Nimbis Services, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
module: htpasswd
version_added: "1.3"
short_description: manage user files for basic authentication
description:
- Add and remove username/password entries in a password file using htpasswd.
- This is used by web servers such as Apache and Nginx for basic authentication.
options:
path:
required: true
aliases: [ dest, destfile ]
description:
- Path to the file that contains the usernames and passwords
name:
required: true
aliases: [ username ]
description:
- User name to add or remove
password:
required: false
description:
- Password associated with user.
- Must be specified if user does not exist yet.
crypt_scheme:
required: false
choices: ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"]
default: "apr_md5_crypt"
description:
- Encryption scheme to be used. As well as the four choices listed
here, you can also use any other hash supported by passlib, such as
md5_crypt and sha256_crypt, which are linux passwd hashes. If you
do so the password file will not be compatible with Apache or Nginx
state:
required: false
choices: [ present, absent ]
default: "present"
description:
- Whether the user entry should be present or not
create:
required: false
type: bool
default: "yes"
description:
- Used with C(state=present). If specified, the file will be created
if it does not already exist. If set to "no", will fail if the
file does not exist
notes:
- "This module depends on the I(passlib) Python library, which needs to be installed on all target systems."
- "On Debian, Ubuntu, or Fedora: install I(python-passlib)."
- "On RHEL or CentOS: Enable EPEL, then install I(python-passlib)."
requirements: [ passlib>=1.6 ]
author: "Ansible Core Team"
extends_documentation_fragment: files
"""
EXAMPLES = """
# Add a user to a password file and ensure permissions are set
- htpasswd:
path: /etc/nginx/passwdfile
name: janedoe
password: '9s36?;fyNp'
owner: root
group: www-data
mode: 0640
# Remove a user from a password file
- htpasswd:
path: /etc/apache2/passwdfile
name: foobar
state: absent
# Add a user to a password file suitable for use by libpam-pwdfile
- htpasswd:
path: /etc/mail/passwords
name: alex
password: oedu2eGh
crypt_scheme: md5_crypt
"""
import os
import tempfile
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
try:
from passlib.apache import HtpasswdFile, htpasswd_context
from passlib.context import CryptContext
import passlib
except ImportError:
passlib_installed = False
else:
passlib_installed = True
apache_hashes = ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"]
def create_missing_directories(dest):
destpath = os.path.dirname(dest)
if not os.path.exists(destpath):
os.makedirs(destpath)
def present(dest, username, password, crypt_scheme, create, check_mode):
""" Ensures user is present
Returns (msg, changed) """
if crypt_scheme in apache_hashes:
context = htpasswd_context
else:
context = CryptContext(schemes=[crypt_scheme] + apache_hashes)
if not os.path.exists(dest):
if not create:
raise ValueError('Destination %s does not exist' % dest)
if check_mode:
return ("Create %s" % dest, True)
create_missing_directories(dest)
if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
ht = HtpasswdFile(dest, new=True, default_scheme=crypt_scheme, context=context)
else:
ht = HtpasswdFile(dest, autoload=False, default=crypt_scheme, context=context)
if getattr(ht, 'set_password', None):
ht.set_password(username, password)
else:
ht.update(username, password)
ht.save()
return ("Created %s and added %s" % (dest, username), True)
else:
if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
ht = HtpasswdFile(dest, new=False, default_scheme=crypt_scheme, context=context)
else:
ht = HtpasswdFile(dest, default=crypt_scheme, context=context)
found = None
if getattr(ht, 'check_password', None):
found = ht.check_password(username, password)
else:
found = ht.verify(username, password)
if found:
return ("%s already present" % username, False)
else:
if not check_mode:
if getattr(ht, 'set_password', None):
ht.set_password(username, password)
else:
ht.update(username, password)
ht.save()
return ("Add/update %s" % username, True)
def absent(dest, username, check_mode):
""" Ensures user is absent
Returns (msg, changed) """
if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
ht = HtpasswdFile(dest, new=False)
else:
ht = HtpasswdFile(dest)
if username not in ht.users():
return ("%s not present" % username, False)
else:
if not check_mode:
ht.delete(username)
ht.save()
return ("Remove %s" % username, True)
def check_file_attrs(module, changed, message):
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
if changed:
message += " and "
changed = True
message += "ownership, perms or SE linux context changed"
return message, changed
def main():
arg_spec = dict(
path=dict(required=True, aliases=["dest", "destfile"]),
name=dict(required=True, aliases=["username"]),
password=dict(required=False, default=None, no_log=True),
crypt_scheme=dict(required=False, default="apr_md5_crypt"),
state=dict(required=False, default="present"),
create=dict(type='bool', default='yes'),
)
module = AnsibleModule(argument_spec=arg_spec,
add_file_common_args=True,
supports_check_mode=True)
path = module.params['path']
username = module.params['name']
password = module.params['password']
crypt_scheme = module.params['crypt_scheme']
state = module.params['state']
create = module.params['create']
check_mode = module.check_mode
if not passlib_installed:
module.fail_json(msg="This module requires the passlib Python library")
# Check file for blank lines in effort to avoid "need more than 1 value to unpack" error.
try:
f = open(path, "r")
except IOError:
# No preexisting file to remove blank lines from
f = None
else:
try:
lines = f.readlines()
finally:
f.close()
# If the file gets edited, it returns true, so only edit the file if it has blank lines
strip = False
for line in lines:
if not line.strip():
strip = True
break
if strip:
# If check mode, create a temporary file
if check_mode:
temp = tempfile.NamedTemporaryFile()
path = temp.name
f = open(path, "w")
try:
[f.write(line) for line in lines if line.strip()]
finally:
f.close()
try:
if state == 'present':
(msg, changed) = present(path, username, password, crypt_scheme, create, check_mode)
elif state == 'absent':
if not os.path.exists(path):
module.exit_json(msg="%s not present" % username,
warnings="%s does not exist" % path, changed=False)
(msg, changed) = absent(path, username, check_mode)
else:
module.fail_json(msg="Invalid state: %s" % state)
check_file_attrs(module, changed, msg)
module.exit_json(msg=msg, changed=changed)
except Exception as e:
module.fail_json(msg=to_native(e))
if __name__ == '__main__':
main()
| gpl-3.0 | -5,869,652,246,539,103,000 | 31.04428 | 110 | 0.616191 | false |
nephila/cmsplugin-filer | cmsplugin_filer_teaser/south_migrations/0002_add_teaser_style_choice.py | 11 | 14099 |
from south.db import db
from django.db import models
from cmsplugin_filer_teaser.models import *
from cmsplugin_filer_utils.migration import rename_tables_new_to_old
class Migration:
cms_plugin_table_mapping = (
# (old_name, new_name),
('cmsplugin_filerteaser', 'cmsplugin_filer_teaser_filerteaser'),
)
def forwards(self, orm):
rename_tables_new_to_old(db, self.cms_plugin_table_mapping)
# Adding field 'FilerTeaser.style'
db.add_column('cmsplugin_filerteaser', 'style', orm['cmsplugin_filer_teaser.filerteaser:style'])
def backwards(self, orm):
rename_tables_new_to_old(db, self.cms_plugin_table_mapping)
# Deleting field 'FilerTeaser.style'
db.delete_column('cmsplugin_filerteaser', 'style')
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '5', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.CMSPlugin']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.page': {
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'menu_login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cmsplugin_filer_teaser.filerteaser': {
'Meta': {'db_table': "'cmsplugin_filerteaser'"},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'free_link': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'height': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']", 'null': 'True', 'blank': 'True'}),
'page_link': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'style': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'use_autoscale': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_file_type_plugin_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'file_field': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'unique_together': "(('parent', 'name'),)"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'sites.site': {
'Meta': {'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cmsplugin_filer_teaser']
| bsd-3-clause | -5,500,905,532,602,201,000 | 83.42515 | 189 | 0.556635 | false |
EKiefer/edge-starter | py34env/Lib/site-packages/pip/utils/outdated.py | 513 | 5455 | from __future__ import absolute_import
import datetime
import json
import logging
import os.path
import sys
from pip._vendor import lockfile
from pip._vendor.packaging import version as packaging_version
from pip.compat import total_seconds, WINDOWS
from pip.models import PyPI
from pip.locations import USER_CACHE_DIR, running_under_virtualenv
from pip.utils import ensure_dir, get_installed_version
from pip.utils.filesystem import check_path_owner
SELFCHECK_DATE_FMT = "%Y-%m-%dT%H:%M:%SZ"
logger = logging.getLogger(__name__)
class VirtualenvSelfCheckState(object):
def __init__(self):
self.statefile_path = os.path.join(sys.prefix, "pip-selfcheck.json")
# Load the existing state
try:
with open(self.statefile_path) as statefile:
self.state = json.load(statefile)
except (IOError, ValueError):
self.state = {}
def save(self, pypi_version, current_time):
# Attempt to write out our version check file
with open(self.statefile_path, "w") as statefile:
json.dump(
{
"last_check": current_time.strftime(SELFCHECK_DATE_FMT),
"pypi_version": pypi_version,
},
statefile,
sort_keys=True,
separators=(",", ":")
)
class GlobalSelfCheckState(object):
def __init__(self):
self.statefile_path = os.path.join(USER_CACHE_DIR, "selfcheck.json")
# Load the existing state
try:
with open(self.statefile_path) as statefile:
self.state = json.load(statefile)[sys.prefix]
except (IOError, ValueError, KeyError):
self.state = {}
def save(self, pypi_version, current_time):
# Check to make sure that we own the directory
if not check_path_owner(os.path.dirname(self.statefile_path)):
return
# Now that we've ensured the directory is owned by this user, we'll go
# ahead and make sure that all our directories are created.
ensure_dir(os.path.dirname(self.statefile_path))
# Attempt to write out our version check file
with lockfile.LockFile(self.statefile_path):
if os.path.exists(self.statefile_path):
with open(self.statefile_path) as statefile:
state = json.load(statefile)
else:
state = {}
state[sys.prefix] = {
"last_check": current_time.strftime(SELFCHECK_DATE_FMT),
"pypi_version": pypi_version,
}
with open(self.statefile_path, "w") as statefile:
json.dump(state, statefile, sort_keys=True,
separators=(",", ":"))
def load_selfcheck_statefile():
if running_under_virtualenv():
return VirtualenvSelfCheckState()
else:
return GlobalSelfCheckState()
def pip_version_check(session):
"""Check for an update for pip.
Limit the frequency of checks to once per week. State is stored either in
the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix
of the pip script path.
"""
installed_version = get_installed_version("pip")
if installed_version is None:
return
pip_version = packaging_version.parse(installed_version)
pypi_version = None
try:
state = load_selfcheck_statefile()
current_time = datetime.datetime.utcnow()
# Determine if we need to refresh the state
if "last_check" in state.state and "pypi_version" in state.state:
last_check = datetime.datetime.strptime(
state.state["last_check"],
SELFCHECK_DATE_FMT
)
if total_seconds(current_time - last_check) < 7 * 24 * 60 * 60:
pypi_version = state.state["pypi_version"]
# Refresh the version if we need to or just see if we need to warn
if pypi_version is None:
resp = session.get(
PyPI.pip_json_url,
headers={"Accept": "application/json"},
)
resp.raise_for_status()
pypi_version = [
v for v in sorted(
list(resp.json()["releases"]),
key=packaging_version.parse,
)
if not packaging_version.parse(v).is_prerelease
][-1]
# save that we've performed a check
state.save(pypi_version, current_time)
remote_version = packaging_version.parse(pypi_version)
# Determine if our pypi_version is older
if (pip_version < remote_version and
pip_version.base_version != remote_version.base_version):
# Advise "python -m pip" on Windows to avoid issues
# with overwriting pip.exe.
if WINDOWS:
pip_cmd = "python -m pip"
else:
pip_cmd = "pip"
logger.warning(
"You are using pip version %s, however version %s is "
"available.\nYou should consider upgrading via the "
"'%s install --upgrade pip' command.",
pip_version, pypi_version, pip_cmd
)
except Exception:
logger.debug(
"There was an error checking the latest version of pip",
exc_info=True,
)
| mit | 1,773,374,036,934,296,800 | 32.67284 | 78 | 0.580202 | false |
tyndare/osmose-backend | analysers/analyser_merge_bicycle_rental_FR_bm.py | 1 | 3368 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
###########################################################################
## ##
## Copyrights Frédéric Rodrigo 2014 ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###########################################################################
from Analyser_Merge import Analyser_Merge, Source, SHP, Load, Mapping, Select, Generate
class Analyser_Merge_Bicycle_Rental_FR_bm(Analyser_Merge):
def __init__(self, config, logger = None):
self.missing_official = {"item":"8160", "class": 1, "level": 3, "tag": ["merge", "public equipment", "cycle"], "desc": T_(u"BM bicycle rental not integrated") }
self.possible_merge = {"item":"8161", "class": 3, "level": 3, "tag": ["merge", "public equipment", "cycle"], "desc": T_(u"BM bicycle rental integration suggestion") }
self.update_official = {"item":"8162", "class": 4, "level": 3, "tag": ["merge", "public equipment", "cycle"], "desc": T_(u"BM bicycle update") }
Analyser_Merge.__init__(self, config, logger,
"http://data.bordeaux-metropole.fr/data.php?themes=10",
u"Station VCUB",
SHP(Source(attribution = u"Bordeaux Métropole", millesime = "08/2016",
fileUrl = "http://data.bordeaux-metropole.fr/files.php?gid=43&format=2", zip = "TB_STVEL_P.shp", encoding = "ISO-8859-15")),
Load(("ST_X(geom)",), ("ST_Y(geom)",), srid = 2154),
Mapping(
select = Select(
types = ["nodes"],
tags = {"amenity": "bicycle_rental"}),
osmRef = "ref",
conflationDistance = 100,
generate = Generate(
static1 = {
"amenity": "bicycle_rental",
"network": "VCUB"},
static2 = {"source": self.source},
mapping1 = {
"name": "NOM",
"ref": "NUMSTAT",
"capacity": "NBSUPPOR",
"vending": lambda res: "subscription" if res["TERMBANC"] == "OUI" else None,
"description": lambda res: "VCUB+" if res["TARIF"] == "VLS PLUS" else None} )))
| gpl-3.0 | -5,211,930,841,178,215,000 | 62.490566 | 176 | 0.455572 | false |
fabioz/Pydev | plugins/org.python.pydev.jython/Lib/sysconfig.py | 13 | 26368 | """Provide access to Python's configuration information.
"""
import sys
import os
from os.path import pardir, realpath
_INSTALL_SCHEMES = {
'posix_prefix': {
'stdlib': '{base}/lib/python{py_version_short}',
'platstdlib': '{platbase}/lib/python{py_version_short}',
'purelib': '{base}/lib/python{py_version_short}/site-packages',
'platlib': '{platbase}/lib/python{py_version_short}/site-packages',
'include': '{base}/include/python{py_version_short}',
'platinclude': '{platbase}/include/python{py_version_short}',
'scripts': '{base}/bin',
'data': '{base}',
},
'posix_home': {
'stdlib': '{base}/lib/python',
'platstdlib': '{base}/lib/python',
'purelib': '{base}/lib/python',
'platlib': '{base}/lib/python',
'include': '{base}/include/python',
'platinclude': '{base}/include/python',
'scripts': '{base}/bin',
'data' : '{base}',
},
'nt': {
'stdlib': '{base}/Lib',
'platstdlib': '{base}/Lib',
'purelib': '{base}/Lib/site-packages',
'platlib': '{base}/Lib/site-packages',
'include': '{base}/Include',
'platinclude': '{base}/Include',
'scripts': '{base}/Scripts',
'data' : '{base}',
},
'os2': {
'stdlib': '{base}/Lib',
'platstdlib': '{base}/Lib',
'purelib': '{base}/Lib/site-packages',
'platlib': '{base}/Lib/site-packages',
'include': '{base}/Include',
'platinclude': '{base}/Include',
'scripts': '{base}/Scripts',
'data' : '{base}',
},
'os2_home': {
'stdlib': '{userbase}/lib/python{py_version_short}',
'platstdlib': '{userbase}/lib/python{py_version_short}',
'purelib': '{userbase}/lib/python{py_version_short}/site-packages',
'platlib': '{userbase}/lib/python{py_version_short}/site-packages',
'include': '{userbase}/include/python{py_version_short}',
'scripts': '{userbase}/bin',
'data' : '{userbase}',
},
'nt_user': {
'stdlib': '{userbase}/Python{py_version_nodot}',
'platstdlib': '{userbase}/Python{py_version_nodot}',
'purelib': '{userbase}/Python{py_version_nodot}/site-packages',
'platlib': '{userbase}/Python{py_version_nodot}/site-packages',
'include': '{userbase}/Python{py_version_nodot}/Include',
'scripts': '{userbase}/Scripts',
'data' : '{userbase}',
},
'posix_user': {
'stdlib': '{userbase}/lib/python{py_version_short}',
'platstdlib': '{userbase}/lib/python{py_version_short}',
'purelib': '{userbase}/lib/python{py_version_short}/site-packages',
'platlib': '{userbase}/lib/python{py_version_short}/site-packages',
'include': '{userbase}/include/python{py_version_short}',
'scripts': '{userbase}/bin',
'data' : '{userbase}',
},
'osx_framework_user': {
'stdlib': '{userbase}/lib/python',
'platstdlib': '{userbase}/lib/python',
'purelib': '{userbase}/lib/python/site-packages',
'platlib': '{userbase}/lib/python/site-packages',
'include': '{userbase}/include',
'scripts': '{userbase}/bin',
'data' : '{userbase}',
},
'java': {
'stdlib': '{base}/lib/jython',
'platstdlib': '{base}/lib/jython',
'purelib': '{base}/lib/jython',
'platlib': '{base}/lib/jython',
'include': '{base}/include/jython',
'platinclude': '{base}/include/jython',
'scripts': '{base}/bin',
'data' : '{base}',
},
'java_user': {
'stdlib': '{userbase}/lib/jython{py_version_short}',
'platstdlib': '{userbase}/lib/jython{py_version_short}',
'purelib': '{userbase}/lib/jython{py_version_short}/site-packages',
'platlib': '{userbase}/lib/jython{py_version_short}/site-packages',
'include': '{userbase}/include/jython{py_version_short}',
'scripts': '{userbase}/bin',
'data' : '{userbase}',
},
}
_SCHEME_KEYS = ('stdlib', 'platstdlib', 'purelib', 'platlib', 'include',
'scripts', 'data')
_PY_VERSION = sys.version.split()[0]
_PY_VERSION_SHORT = sys.version[:3]
_PY_VERSION_SHORT_NO_DOT = _PY_VERSION[0] + _PY_VERSION[2]
_PREFIX = os.path.normpath(sys.prefix)
_EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
_CONFIG_VARS = None
_USER_BASE = None
def _safe_realpath(path):
try:
return realpath(path)
except OSError:
return path
if sys.executable:
_PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable))
else:
# sys.executable can be empty if argv[0] has been changed and Python is
# unable to retrieve the real program name
_PROJECT_BASE = _safe_realpath(os.getcwd())
if os.name == "nt" and "pcbuild" in _PROJECT_BASE[-8:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir))
# PC/VS7.1
if os.name == "nt" and "\\pc\\v" in _PROJECT_BASE[-10:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
# PC/AMD64
if os.name == "nt" and "\\pcbuild\\amd64" in _PROJECT_BASE[-14:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
def is_python_build():
for fn in ("Setup.dist", "Setup.local"):
if os.path.isfile(os.path.join(_PROJECT_BASE, "Modules", fn)):
return True
return False
_PYTHON_BUILD = is_python_build()
if _PYTHON_BUILD:
for scheme in ('posix_prefix', 'posix_home'):
_INSTALL_SCHEMES[scheme]['include'] = '{projectbase}/Include'
_INSTALL_SCHEMES[scheme]['platinclude'] = '{srcdir}'
def _subst_vars(s, local_vars):
try:
return s.format(**local_vars)
except KeyError:
try:
return s.format(**os.environ)
except KeyError, var:
raise AttributeError('{%s}' % var)
def _extend_dict(target_dict, other_dict):
target_keys = target_dict.keys()
for key, value in other_dict.items():
if key in target_keys:
continue
target_dict[key] = value
def _expand_vars(scheme, vars):
res = {}
if vars is None:
vars = {}
_extend_dict(vars, get_config_vars())
for key, value in _INSTALL_SCHEMES[scheme].items():
if os.name in ('posix', 'nt', 'java'):
value = os.path.expanduser(value)
res[key] = os.path.normpath(_subst_vars(value, vars))
return res
def _get_default_scheme():
if os.name == 'posix':
# the default scheme for posix is posix_prefix
return 'posix_prefix'
return os.name
def _getuserbase():
env_base = os.environ.get("PYTHONUSERBASE", None)
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
# what about 'os2emx', 'riscos' ?
if os.name == "nt":
base = os.environ.get("APPDATA") or "~"
return env_base if env_base else joinuser(base, "Python")
if sys.platform == "darwin":
framework = get_config_var("PYTHONFRAMEWORK")
if framework:
return env_base if env_base else \
joinuser("~", "Library", framework, "%d.%d"
% (sys.version_info[:2]))
return env_base if env_base else joinuser("~", ".local")
def _parse_makefile(filename, vars=None):
"""Parse a Makefile-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
import re
# Regexes needed for parsing Makefile (and similar syntaxes,
# like old-style Setup files).
_variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
if vars is None:
vars = {}
done = {}
notdone = {}
with open(filename) as f:
lines = f.readlines()
for line in lines:
if line.startswith('#') or line.strip() == '':
continue
m = _variable_rx.match(line)
if m:
n, v = m.group(1, 2)
v = v.strip()
# `$$' is a literal `$' in make
tmpv = v.replace('$$', '')
if "$" in tmpv:
notdone[n] = v
else:
try:
v = int(v)
except ValueError:
# insert literal `$'
done[n] = v.replace('$$', '$')
else:
done[n] = v
# do variable interpolation here
while notdone:
for name in notdone.keys():
value = notdone[name]
m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
if m:
n = m.group(1)
found = True
if n in done:
item = str(done[n])
elif n in notdone:
# get it on a subsequent round
found = False
elif n in os.environ:
# do it like make: fall back to environment
item = os.environ[n]
else:
done[n] = item = ""
if found:
after = value[m.end():]
value = value[:m.start()] + item + after
if "$" in after:
notdone[name] = value
else:
try: value = int(value)
except ValueError:
done[name] = value.strip()
else:
done[name] = value
del notdone[name]
else:
# bogus variable reference; just drop it since we can't deal
del notdone[name]
# strip spurious spaces
for k, v in done.items():
if isinstance(v, str):
done[k] = v.strip()
# save the results in the global dictionary
vars.update(done)
return vars
def _get_makefile_filename():
if _PYTHON_BUILD:
return os.path.join(_PROJECT_BASE, "Makefile")
return os.path.join(get_path('platstdlib'), "config", "Makefile")
def _init_posix(vars):
"""Initialize the module as appropriate for POSIX systems."""
# load the installed Makefile:
makefile = _get_makefile_filename()
try:
_parse_makefile(makefile, vars)
except IOError, e:
msg = "invalid Python installation: unable to open %s" % makefile
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise IOError(msg)
# load the installed pyconfig.h:
config_h = get_config_h_filename()
try:
with open(config_h) as f:
parse_config_h(f, vars)
except IOError, e:
msg = "invalid Python installation: unable to open %s" % config_h
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise IOError(msg)
# On AIX, there are wrong paths to the linker scripts in the Makefile
# -- these paths are relative to the Python source, but when installed
# the scripts are in another directory.
if _PYTHON_BUILD:
vars['LDSHARED'] = vars['BLDSHARED']
def _init_non_posix(vars):
"""Initialize the module as appropriate for NT"""
# set basic install directories
vars['LIBDEST'] = get_path('stdlib')
vars['BINLIBDEST'] = get_path('platstdlib')
vars['INCLUDEPY'] = get_path('include')
vars['SO'] = '.pyd'
vars['EXE'] = '.exe'
vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT
vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable))
#
# public APIs
#
def parse_config_h(fp, vars=None):
"""Parse a config.h-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
import re
if vars is None:
vars = {}
define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
while True:
line = fp.readline()
if not line:
break
m = define_rx.match(line)
if m:
n, v = m.group(1, 2)
try: v = int(v)
except ValueError: pass
vars[n] = v
else:
m = undef_rx.match(line)
if m:
vars[m.group(1)] = 0
return vars
def get_config_h_filename():
"""Returns the path of pyconfig.h."""
if _PYTHON_BUILD:
if os.name == "nt":
inc_dir = os.path.join(_PROJECT_BASE, "PC")
else:
inc_dir = _PROJECT_BASE
else:
inc_dir = get_path('platinclude')
return os.path.join(inc_dir, 'pyconfig.h')
def get_scheme_names():
"""Returns a tuple containing the schemes names."""
schemes = _INSTALL_SCHEMES.keys()
schemes.sort()
return tuple(schemes)
def get_path_names():
"""Returns a tuple containing the paths names."""
return _SCHEME_KEYS
def get_paths(scheme=_get_default_scheme(), vars=None, expand=True):
"""Returns a mapping containing an install scheme.
``scheme`` is the install scheme name. If not provided, it will
return the default scheme for the current platform.
"""
if expand:
return _expand_vars(scheme, vars)
else:
return _INSTALL_SCHEMES[scheme]
def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True):
"""Returns a path corresponding to the scheme.
``scheme`` is the install scheme name.
"""
return get_paths(scheme, vars, expand)[name]
def get_config_vars(*args):
"""With no arguments, return a dictionary of all configuration
variables relevant for the current platform.
On Unix, this means every variable defined in Python's installed Makefile;
On Windows and Mac OS it's a much smaller set.
With arguments, return a list of values that result from looking up
each argument in the configuration variable dictionary.
"""
import re
global _CONFIG_VARS
if _CONFIG_VARS is None:
_CONFIG_VARS = {}
# Normalized versions of prefix and exec_prefix are handy to have;
# in fact, these are the standard versions used most places in the
# Distutils.
_CONFIG_VARS['prefix'] = _PREFIX
_CONFIG_VARS['exec_prefix'] = _EXEC_PREFIX
_CONFIG_VARS['py_version'] = _PY_VERSION
_CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT
_CONFIG_VARS['py_version_nodot'] = _PY_VERSION[0] + _PY_VERSION[2]
_CONFIG_VARS['base'] = _PREFIX
_CONFIG_VARS['platbase'] = _EXEC_PREFIX
_CONFIG_VARS['projectbase'] = _PROJECT_BASE
if os.name in ('nt', 'os2'):
_init_non_posix(_CONFIG_VARS)
if os.name == 'posix':
_init_posix(_CONFIG_VARS)
# Setting 'userbase' is done below the call to the
# init function to enable using 'get_config_var' in
# the init-function.
_CONFIG_VARS['userbase'] = _getuserbase()
if 'srcdir' not in _CONFIG_VARS:
_CONFIG_VARS['srcdir'] = _PROJECT_BASE
# Convert srcdir into an absolute path if it appears necessary.
# Normally it is relative to the build directory. However, during
# testing, for example, we might be running a non-installed python
# from a different directory.
if _PYTHON_BUILD and os.name == "posix":
base = _PROJECT_BASE
try:
cwd = os.getcwd()
except OSError:
cwd = None
if (not os.path.isabs(_CONFIG_VARS['srcdir']) and
base != cwd):
# srcdir is relative and we are not in the same directory
# as the executable. Assume executable is in the build
# directory and make srcdir absolute.
srcdir = os.path.join(base, _CONFIG_VARS['srcdir'])
_CONFIG_VARS['srcdir'] = os.path.normpath(srcdir)
if sys.platform == 'darwin':
kernel_version = os.uname()[2] # Kernel version (8.4.3)
major_version = int(kernel_version.split('.')[0])
if major_version < 8:
# On Mac OS X before 10.4, check if -arch and -isysroot
# are in CFLAGS or LDFLAGS and remove them if they are.
# This is needed when building extensions on a 10.3 system
# using a universal build of python.
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = re.sub('-isysroot [^ \t]*', ' ', flags)
_CONFIG_VARS[key] = flags
else:
# Allow the user to override the architecture flags using
# an environment variable.
# NOTE: This name was introduced by Apple in OSX 10.5 and
# is used by several scripting languages distributed with
# that OS release.
if 'ARCHFLAGS' in os.environ:
arch = os.environ['ARCHFLAGS']
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = flags + ' ' + arch
_CONFIG_VARS[key] = flags
# If we're on OSX 10.5 or later and the user tries to
# compiles an extension using an SDK that is not present
# on the current machine it is better to not use an SDK
# than to fail.
#
# The major usecase for this is users using a Python.org
# binary installer on OSX 10.6: that installer uses
# the 10.4u SDK, but that SDK is not installed by default
# when you install Xcode.
#
CFLAGS = _CONFIG_VARS.get('CFLAGS', '')
m = re.search('-isysroot\s+(\S+)', CFLAGS)
if m is not None:
sdk = m.group(1)
if not os.path.exists(sdk):
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-isysroot\s+\S+(\s|$)', ' ', flags)
_CONFIG_VARS[key] = flags
if args:
vals = []
for name in args:
vals.append(_CONFIG_VARS.get(name))
return vals
else:
return _CONFIG_VARS
def get_config_var(name):
"""Return the value of a single variable using the dictionary returned by
'get_config_vars()'.
Equivalent to get_config_vars().get(name)
"""
return get_config_vars().get(name)
def get_platform():
"""Return a string that identifies the current platform.
This is used mainly to distinguish platform-specific build directories and
platform-specific built distributions. Typically includes the OS name
and version and the architecture (as supplied by 'os.uname()'),
although the exact information included depends on the OS; eg. for IRIX
the architecture isn't particularly important (IRIX only runs on SGI
hardware), but for Linux the kernel version isn't particularly
important.
Examples of returned values:
linux-i586
linux-alpha (?)
solaris-2.6-sun4u
irix-5.3
irix64-6.2
Windows will return one of:
win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
win-ia64 (64bit Windows on Itanium)
win32 (all others - specifically, sys.platform is returned)
For other non-POSIX platforms, currently just returns 'sys.platform'.
"""
import re
if os.name == 'nt':
# sniff sys.version for architecture.
prefix = " bit ("
i = sys.version.find(prefix)
if i == -1:
return sys.platform
j = sys.version.find(")", i)
look = sys.version[i+len(prefix):j].lower()
if look == 'amd64':
return 'win-amd64'
if look == 'itanium':
return 'win-ia64'
return sys.platform
if os.name != "posix" or not hasattr(os, 'uname'):
# XXX what about the architecture? NT is Intel or Alpha,
# Mac OS is M68k or PPC, etc.
return sys.platform
# Try to distinguish various flavours of Unix
osname, host, release, version, machine = os.uname()
# Convert the OS name to lowercase, remove '/' characters
# (to accommodate BSD/OS), and translate spaces (for "Power Macintosh")
osname = osname.lower().replace('/', '')
machine = machine.replace(' ', '_')
machine = machine.replace('/', '-')
if osname[:5] == "linux":
# At least on Linux/Intel, 'machine' is the processor --
# i386, etc.
# XXX what about Alpha, SPARC, etc?
return "%s-%s" % (osname, machine)
elif osname[:5] == "sunos":
if release[0] >= "5": # SunOS 5 == Solaris 2
osname = "solaris"
release = "%d.%s" % (int(release[0]) - 3, release[2:])
# We can't use "platform.architecture()[0]" because a
# bootstrap problem. We use a dict to get an error
# if some suspicious happens.
bitness = {2147483647:"32bit", 9223372036854775807:"64bit"}
machine += ".%s" % bitness[sys.maxint]
# fall through to standard osname-release-machine representation
elif osname[:4] == "irix": # could be "irix64"!
return "%s-%s" % (osname, release)
elif osname[:3] == "aix":
return "%s-%s.%s" % (osname, version, release)
elif osname[:6] == "cygwin":
osname = "cygwin"
rel_re = re.compile (r'[\d.]+')
m = rel_re.match(release)
if m:
release = m.group()
elif osname[:6] == "darwin":
#
# For our purposes, we'll assume that the system version from
# distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
# to. This makes the compatibility story a bit more sane because the
# machine is going to compile and link as if it were
# MACOSX_DEPLOYMENT_TARGET.
cfgvars = get_config_vars()
macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET')
if 1:
# Always calculate the release of the running machine,
# needed to determine if we can build fat binaries or not.
macrelease = macver
# Get the system version. Reading this plist is a documented
# way to get the system version (see the documentation for
# the Gestalt Manager)
try:
f = open('/System/Library/CoreServices/SystemVersion.plist')
except IOError:
# We're on a plain darwin box, fall back to the default
# behaviour.
pass
else:
try:
m = re.search(
r'<key>ProductUserVisibleVersion</key>\s*' +
r'<string>(.*?)</string>', f.read())
if m is not None:
macrelease = '.'.join(m.group(1).split('.')[:2])
# else: fall back to the default behaviour
finally:
f.close()
if not macver:
macver = macrelease
if macver:
release = macver
osname = "macosx"
if (macrelease + '.') >= '10.4.' and \
'-arch' in get_config_vars().get('CFLAGS', '').strip():
# The universal build will build fat binaries, but not on
# systems before 10.4
#
# Try to detect 4-way universal builds, those have machine-type
# 'universal' instead of 'fat'.
machine = 'fat'
cflags = get_config_vars().get('CFLAGS')
archs = re.findall('-arch\s+(\S+)', cflags)
archs = tuple(sorted(set(archs)))
if len(archs) == 1:
machine = archs[0]
elif archs == ('i386', 'ppc'):
machine = 'fat'
elif archs == ('i386', 'x86_64'):
machine = 'intel'
elif archs == ('i386', 'ppc', 'x86_64'):
machine = 'fat3'
elif archs == ('ppc64', 'x86_64'):
machine = 'fat64'
elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
machine = 'universal'
else:
raise ValueError(
"Don't know machine value for archs=%r"%(archs,))
elif machine == 'i386':
# On OSX the machine type returned by uname is always the
# 32-bit variant, even if the executable architecture is
# the 64-bit variant
if sys.maxint >= 2**32:
machine = 'x86_64'
elif machine in ('PowerPC', 'Power_Macintosh'):
# Pick a sane name for the PPC architecture.
# See 'i386' case
if sys.maxint >= 2**32:
machine = 'ppc64'
else:
machine = 'ppc'
return "%s-%s-%s" % (osname, release, machine)
def get_python_version():
return _PY_VERSION_SHORT
| epl-1.0 | -9,214,944,933,696,906,000 | 35.981767 | 79 | 0.537128 | false |
Endika/django | tests/many_to_one/tests.py | 15 | 29129 | import datetime
from copy import deepcopy
from django.core.exceptions import FieldError, MultipleObjectsReturned
from django.db import models, transaction
from django.db.utils import IntegrityError
from django.test import TestCase
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.translation import ugettext_lazy
from .models import (
Article, Category, Child, City, District, First, Parent, Record, Relation,
Reporter, School, Student, Third, ToFieldChild,
)
class ManyToOneTests(TestCase):
def setUp(self):
# Create a few Reporters.
self.r = Reporter(first_name='John', last_name='Smith', email='[email protected]')
self.r.save()
self.r2 = Reporter(first_name='Paul', last_name='Jones', email='[email protected]')
self.r2.save()
# Create an Article.
self.a = Article(headline="This is a test", pub_date=datetime.date(2005, 7, 27), reporter=self.r)
self.a.save()
def test_get(self):
# Article objects have access to their related Reporter objects.
r = self.a.reporter
self.assertEqual(r.id, self.r.id)
# These are strings instead of unicode strings because that's what was used in
# the creation of this reporter (and we haven't refreshed the data from the
# database, which always returns unicode strings).
self.assertEqual((r.first_name, self.r.last_name), ('John', 'Smith'))
def test_create(self):
# You can also instantiate an Article by passing the Reporter's ID
# instead of a Reporter object.
a3 = Article(headline="Third article", pub_date=datetime.date(2005, 7, 27), reporter_id=self.r.id)
a3.save()
self.assertEqual(a3.reporter.id, self.r.id)
# Similarly, the reporter ID can be a string.
a4 = Article(headline="Fourth article", pub_date=datetime.date(2005, 7, 27), reporter_id=str(self.r.id))
a4.save()
self.assertEqual(repr(a4.reporter), "<Reporter: John Smith>")
def test_add(self):
# Create an Article via the Reporter object.
new_article = self.r.article_set.create(headline="John's second story", pub_date=datetime.date(2005, 7, 29))
self.assertEqual(repr(new_article), "<Article: John's second story>")
self.assertEqual(new_article.reporter.id, self.r.id)
# Create a new article, and add it to the article set.
new_article2 = Article(headline="Paul's story", pub_date=datetime.date(2006, 1, 17))
msg = "<Article: Paul's story> instance isn't saved. Use bulk=False or save the object first."
with self.assertRaisesMessage(ValueError, msg):
self.r.article_set.add(new_article2)
self.r.article_set.add(new_article2, bulk=False)
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(
self.r.article_set.all(),
["<Article: John's second story>", "<Article: Paul's story>", "<Article: This is a test>"]
)
# Add the same article to a different article set - check that it moves.
self.r2.article_set.add(new_article2)
self.assertEqual(new_article2.reporter.id, self.r2.id)
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
# Adding an object of the wrong type raises TypeError.
with transaction.atomic():
with six.assertRaisesRegex(self, TypeError,
"'Article' instance expected, got <Reporter.*"):
self.r.article_set.add(self.r2)
self.assertQuerysetEqual(
self.r.article_set.all(),
["<Article: John's second story>", "<Article: This is a test>"]
)
def test_set(self):
new_article = self.r.article_set.create(headline="John's second story", pub_date=datetime.date(2005, 7, 29))
new_article2 = self.r2.article_set.create(headline="Paul's story", pub_date=datetime.date(2006, 1, 17))
# Assign the article to the reporter.
new_article2.reporter = self.r
new_article2.save()
self.assertEqual(repr(new_article2.reporter), "<Reporter: John Smith>")
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), [])
# Set the article back again.
self.r2.article_set.set([new_article, new_article2])
self.assertQuerysetEqual(self.r.article_set.all(), ["<Article: This is a test>"])
self.assertQuerysetEqual(
self.r2.article_set.all(),
["<Article: John's second story>", "<Article: Paul's story>"]
)
# Funny case - because the ForeignKey cannot be null,
# existing members of the set must remain.
self.r.article_set.set([new_article])
self.assertQuerysetEqual(
self.r.article_set.all(),
["<Article: John's second story>", "<Article: This is a test>"]
)
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
def test_reverse_assignment_deprecation(self):
msg = (
"Direct assignment to the reverse side of a related set is "
"deprecated due to the implicit save() that happens. Use "
"article_set.set() instead."
)
with self.assertRaisesMessage(RemovedInDjango20Warning, msg):
self.r2.article_set = []
def test_assign(self):
new_article = self.r.article_set.create(headline="John's second story", pub_date=datetime.date(2005, 7, 29))
new_article2 = self.r2.article_set.create(headline="Paul's story", pub_date=datetime.date(2006, 1, 17))
# Assign the article to the reporter directly using the descriptor.
new_article2.reporter = self.r
new_article2.save()
self.assertEqual(repr(new_article2.reporter), "<Reporter: John Smith>")
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), [])
# Set the article back again using set() method.
self.r2.article_set.set([new_article, new_article2])
self.assertQuerysetEqual(self.r.article_set.all(), ["<Article: This is a test>"])
self.assertQuerysetEqual(
self.r2.article_set.all(),
["<Article: John's second story>", "<Article: Paul's story>"]
)
# Because the ForeignKey cannot be null, existing members of the set
# must remain.
self.r.article_set.set([new_article])
self.assertQuerysetEqual(
self.r.article_set.all(),
["<Article: John's second story>", "<Article: This is a test>"]
)
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
# Reporter cannot be null - there should not be a clear or remove method
self.assertFalse(hasattr(self.r2.article_set, 'remove'))
self.assertFalse(hasattr(self.r2.article_set, 'clear'))
def test_selects(self):
self.r.article_set.create(headline="John's second story", pub_date=datetime.date(2005, 7, 29))
self.r2.article_set.create(headline="Paul's story", pub_date=datetime.date(2006, 1, 17))
# Reporter objects have access to their related Article objects.
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r.article_set.filter(headline__startswith='This'), ["<Article: This is a test>"])
self.assertEqual(self.r.article_set.count(), 2)
self.assertEqual(self.r2.article_set.count(), 1)
# Get articles by id
self.assertQuerysetEqual(Article.objects.filter(id__exact=self.a.id), ["<Article: This is a test>"])
self.assertQuerysetEqual(Article.objects.filter(pk=self.a.id), ["<Article: This is a test>"])
# Query on an article property
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='This'), ["<Article: This is a test>"])
# The API automatically follows relationships as far as you need.
# Use double underscores to separate relationships.
# This works as many levels deep as you want. There's no limit.
# Find all Articles for any Reporter whose first name is "John".
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name__exact='John'),
["<Article: John's second story>", "<Article: This is a test>"]
)
# Check that implied __exact also works
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name='John'),
["<Article: John's second story>", "<Article: This is a test>"]
)
# Query twice over the related field.
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name__exact='John', reporter__last_name__exact='Smith'),
["<Article: John's second story>", "<Article: This is a test>"]
)
# The underlying query only makes one join when a related table is referenced twice.
queryset = Article.objects.filter(reporter__first_name__exact='John', reporter__last_name__exact='Smith')
self.assertNumQueries(1, list, queryset)
self.assertEqual(queryset.query.get_compiler(queryset.db).as_sql()[0].count('INNER JOIN'), 1)
# The automatically joined table has a predictable name.
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name__exact='John').extra(
where=["many_to_one_reporter.last_name='Smith'"]),
["<Article: John's second story>", "<Article: This is a test>"]
)
# ... and should work fine with the unicode that comes out of forms.Form.cleaned_data
self.assertQuerysetEqual(
(Article.objects
.filter(reporter__first_name__exact='John')
.extra(where=["many_to_one_reporter.last_name='%s'" % 'Smith'])),
["<Article: John's second story>", "<Article: This is a test>"]
)
# Find all Articles for a Reporter.
# Use direct ID check, pk check, and object comparison
self.assertQuerysetEqual(
Article.objects.filter(reporter__id__exact=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__pk=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter=self.r),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__in=[self.r.id, self.r2.id]).distinct(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__in=[self.r, self.r2]).distinct(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
# You can also use a queryset instead of a literal list of instances.
# The queryset must be reduced to a list of values using values(),
# then converted into a query
self.assertQuerysetEqual(
Article.objects.filter(
reporter__in=Reporter.objects.filter(first_name='John').values('pk').query
).distinct(),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
def test_reverse_selects(self):
a3 = Article.objects.create(
headline="Third article",
pub_date=datetime.date(2005, 7, 27),
reporter_id=self.r.id,
)
Article.objects.create(
headline="Fourth article",
pub_date=datetime.date(2005, 7, 27),
reporter_id=self.r.id,
)
john_smith = ["<Reporter: John Smith>"]
# Reporters can be queried
self.assertQuerysetEqual(Reporter.objects.filter(id__exact=self.r.id), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(pk=self.r.id), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(first_name__startswith='John'), john_smith)
# Reporters can query in opposite direction of ForeignKey definition
self.assertQuerysetEqual(Reporter.objects.filter(article__id__exact=self.a.id), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(article__pk=self.a.id), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(article=self.a.id), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(article=self.a), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(article__in=[self.a.id, a3.id]).distinct(), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(article__in=[self.a.id, a3]).distinct(), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(article__in=[self.a, a3]).distinct(), john_smith)
self.assertQuerysetEqual(
Reporter.objects.filter(article__headline__startswith='T'),
["<Reporter: John Smith>", "<Reporter: John Smith>"],
ordered=False
)
self.assertQuerysetEqual(Reporter.objects.filter(article__headline__startswith='T').distinct(), john_smith)
# Counting in the opposite direction works in conjunction with distinct()
self.assertEqual(Reporter.objects.filter(article__headline__startswith='T').count(), 2)
self.assertEqual(Reporter.objects.filter(article__headline__startswith='T').distinct().count(), 1)
# Queries can go round in circles.
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter__first_name__startswith='John'),
[
"<Reporter: John Smith>",
"<Reporter: John Smith>",
"<Reporter: John Smith>",
],
ordered=False
)
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter__first_name__startswith='John').distinct(),
john_smith
)
self.assertQuerysetEqual(Reporter.objects.filter(article__reporter__exact=self.r).distinct(), john_smith)
# Check that implied __exact also works.
self.assertQuerysetEqual(Reporter.objects.filter(article__reporter=self.r).distinct(), john_smith)
# It's possible to use values() calls across many-to-one relations.
# (Note, too, that we clear the ordering here so as not to drag the
# 'headline' field into the columns being used to determine uniqueness)
d = {'reporter__first_name': 'John', 'reporter__last_name': 'Smith'}
qs = Article.objects.filter(
reporter=self.r,
).distinct().order_by().values('reporter__first_name', 'reporter__last_name')
self.assertEqual([d], list(qs))
def test_select_related(self):
# Check that Article.objects.select_related().dates() works properly when
# there are multiple Articles with the same date but different foreign-key
# objects (Reporters).
r1 = Reporter.objects.create(first_name='Mike', last_name='Royko', email='[email protected]')
r2 = Reporter.objects.create(first_name='John', last_name='Kass', email='[email protected]')
Article.objects.create(headline='First', pub_date=datetime.date(1980, 4, 23), reporter=r1)
Article.objects.create(headline='Second', pub_date=datetime.date(1980, 4, 23), reporter=r2)
self.assertEqual(
list(Article.objects.select_related().dates('pub_date', 'day')),
[datetime.date(1980, 4, 23), datetime.date(2005, 7, 27)]
)
self.assertEqual(
list(Article.objects.select_related().dates('pub_date', 'month')),
[datetime.date(1980, 4, 1), datetime.date(2005, 7, 1)]
)
self.assertEqual(
list(Article.objects.select_related().dates('pub_date', 'year')),
[datetime.date(1980, 1, 1), datetime.date(2005, 1, 1)]
)
def test_delete(self):
self.r.article_set.create(headline="John's second story", pub_date=datetime.date(2005, 7, 29))
self.r2.article_set.create(headline="Paul's story", pub_date=datetime.date(2006, 1, 17))
Article.objects.create(headline="Third article", pub_date=datetime.date(2005, 7, 27), reporter_id=self.r.id)
Article.objects.create(
headline="Fourth article",
pub_date=datetime.date(2005, 7, 27),
reporter_id=str(self.r.id),
)
# If you delete a reporter, his articles will be deleted.
self.assertQuerysetEqual(
Article.objects.all(),
[
"<Article: Fourth article>",
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: Third article>",
"<Article: This is a test>",
]
)
self.assertQuerysetEqual(
Reporter.objects.order_by('first_name'),
["<Reporter: John Smith>", "<Reporter: Paul Jones>"]
)
self.r2.delete()
self.assertQuerysetEqual(
Article.objects.all(),
[
"<Article: Fourth article>",
"<Article: John's second story>",
"<Article: Third article>",
"<Article: This is a test>",
]
)
self.assertQuerysetEqual(Reporter.objects.order_by('first_name'), ["<Reporter: John Smith>"])
# You can delete using a JOIN in the query.
Reporter.objects.filter(article__headline__startswith='This').delete()
self.assertQuerysetEqual(Reporter.objects.all(), [])
self.assertQuerysetEqual(Article.objects.all(), [])
def test_explicit_fk(self):
# Create a new Article with get_or_create using an explicit value
# for a ForeignKey.
a2, created = Article.objects.get_or_create(
headline="John's second test",
pub_date=datetime.date(2011, 5, 7),
reporter_id=self.r.id,
)
self.assertTrue(created)
self.assertEqual(a2.reporter.id, self.r.id)
# You can specify filters containing the explicit FK value.
self.assertQuerysetEqual(
Article.objects.filter(reporter_id__exact=self.r.id),
["<Article: John's second test>", "<Article: This is a test>"]
)
# Create an Article by Paul for the same date.
a3 = Article.objects.create(
headline="Paul's commentary",
pub_date=datetime.date(2011, 5, 7),
reporter_id=self.r2.id,
)
self.assertEqual(a3.reporter.id, self.r2.id)
# Get should respect explicit foreign keys as well.
with self.assertRaises(MultipleObjectsReturned):
Article.objects.get(reporter_id=self.r.id)
self.assertEqual(
repr(a3),
repr(Article.objects.get(reporter_id=self.r2.id, pub_date=datetime.date(2011, 5, 7)))
)
def test_deepcopy_and_circular_references(self):
# Regression for #12876 -- Model methods that include queries that
# recursive don't cause recursion depth problems under deepcopy.
self.r.cached_query = Article.objects.filter(reporter=self.r)
self.assertEqual(repr(deepcopy(self.r)), "<Reporter: John Smith>")
def test_manager_class_caching(self):
r1 = Reporter.objects.create(first_name='Mike')
r2 = Reporter.objects.create(first_name='John')
# Same twice
self.assertIs(r1.article_set.__class__, r1.article_set.__class__)
# Same as each other
self.assertIs(r1.article_set.__class__, r2.article_set.__class__)
def test_create_relation_with_ugettext_lazy(self):
reporter = Reporter.objects.create(first_name='John', last_name='Smith', email='[email protected]')
lazy = ugettext_lazy('test')
reporter.article_set.create(headline=lazy, pub_date=datetime.date(2011, 6, 10))
notlazy = six.text_type(lazy)
article = reporter.article_set.get()
self.assertEqual(article.headline, notlazy)
def test_values_list_exception(self):
expected_message = "Cannot resolve keyword 'notafield' into field. Choices are: %s"
reporter_fields = ', '.join(sorted(f.name for f in Reporter._meta.get_fields()))
with self.assertRaisesMessage(FieldError, expected_message % reporter_fields):
Article.objects.values_list('reporter__notafield')
article_fields = ', '.join(['EXTRA'] + sorted(f.name for f in Article._meta.get_fields()))
with self.assertRaisesMessage(FieldError, expected_message % article_fields):
Article.objects.extra(select={'EXTRA': 'EXTRA_SELECT'}).values_list('notafield')
def test_fk_assignment_and_related_object_cache(self):
# Tests of ForeignKey assignment and the related-object cache (see #6886).
p = Parent.objects.create(name="Parent")
c = Child.objects.create(name="Child", parent=p)
# Look up the object again so that we get a "fresh" object.
c = Child.objects.get(name="Child")
p = c.parent
# Accessing the related object again returns the exactly same object.
self.assertIs(c.parent, p)
# But if we kill the cache, we get a new object.
del c._parent_cache
self.assertIsNot(c.parent, p)
# Assigning a new object results in that object getting cached immediately.
p2 = Parent.objects.create(name="Parent 2")
c.parent = p2
self.assertIs(c.parent, p2)
# Assigning None succeeds if field is null=True.
p.bestchild = None
self.assertIsNone(p.bestchild)
# bestchild should still be None after saving.
p.save()
self.assertIsNone(p.bestchild)
# bestchild should still be None after fetching the object again.
p = Parent.objects.get(name="Parent")
self.assertIsNone(p.bestchild)
# Assigning None will not fail: Child.parent is null=False.
setattr(c, "parent", None)
# You also can't assign an object of the wrong type here
with self.assertRaises(ValueError):
setattr(c, "parent", First(id=1, second=1))
# You can assign None to Child.parent during object creation.
Child(name='xyzzy', parent=None)
# But when trying to save a Child with parent=None, the database will
# raise IntegrityError.
with self.assertRaises(IntegrityError), transaction.atomic():
Child.objects.create(name='xyzzy', parent=None)
# Creation using keyword argument should cache the related object.
p = Parent.objects.get(name="Parent")
c = Child(parent=p)
self.assertIs(c.parent, p)
# Creation using keyword argument and unsaved related instance (#8070).
p = Parent()
msg = "save() prohibited to prevent data loss due to unsaved related object 'parent'."
with self.assertRaisesMessage(ValueError, msg):
Child.objects.create(parent=p)
msg = "save() prohibited to prevent data loss due to unsaved related object 'parent'."
with self.assertRaisesMessage(ValueError, msg):
ToFieldChild.objects.create(parent=p)
# Creation using attname keyword argument and an id will cause the
# related object to be fetched.
p = Parent.objects.get(name="Parent")
c = Child(parent_id=p.id)
self.assertIsNot(c.parent, p)
self.assertEqual(c.parent, p)
def test_fk_to_bigautofield(self):
ch = City.objects.create(name='Chicago')
District.objects.create(city=ch, name='Far South')
District.objects.create(city=ch, name='North')
ny = City.objects.create(name='New York', id=2 ** 33)
District.objects.create(city=ny, name='Brooklyn')
District.objects.create(city=ny, name='Manhattan')
def test_multiple_foreignkeys(self):
# Test of multiple ForeignKeys to the same model (bug #7125).
c1 = Category.objects.create(name='First')
c2 = Category.objects.create(name='Second')
c3 = Category.objects.create(name='Third')
r1 = Record.objects.create(category=c1)
r2 = Record.objects.create(category=c1)
r3 = Record.objects.create(category=c2)
r4 = Record.objects.create(category=c2)
r5 = Record.objects.create(category=c3)
Relation.objects.create(left=r1, right=r2)
Relation.objects.create(left=r3, right=r4)
Relation.objects.create(left=r1, right=r3)
Relation.objects.create(left=r5, right=r2)
Relation.objects.create(left=r3, right=r2)
q1 = Relation.objects.filter(left__category__name__in=['First'], right__category__name__in=['Second'])
self.assertQuerysetEqual(q1, ["<Relation: First - Second>"])
q2 = Category.objects.filter(record__left_set__right__category__name='Second').order_by('name')
self.assertQuerysetEqual(q2, ["<Category: First>", "<Category: Second>"])
p = Parent.objects.create(name="Parent")
c = Child.objects.create(name="Child", parent=p)
with self.assertRaises(ValueError):
Child.objects.create(name="Grandchild", parent=c)
def test_fk_instantiation_outside_model(self):
# Regression for #12190 -- Should be able to instantiate a FK outside
# of a model, and interrogate its related field.
cat = models.ForeignKey(Category, models.CASCADE)
self.assertEqual('id', cat.remote_field.get_related_field().name)
def test_relation_unsaved(self):
# Test that the <field>_set manager does not join on Null value fields (#17541)
Third.objects.create(name='Third 1')
Third.objects.create(name='Third 2')
th = Third(name="testing")
# The object isn't saved an thus the relation field is null - we won't even
# execute a query in this case.
with self.assertNumQueries(0):
self.assertEqual(th.child_set.count(), 0)
th.save()
# Now the model is saved, so we will need to execute an query.
with self.assertNumQueries(1):
self.assertEqual(th.child_set.count(), 0)
def test_related_object(self):
public_school = School.objects.create(is_public=True)
public_student = Student.objects.create(school=public_school)
private_school = School.objects.create(is_public=False)
private_student = Student.objects.create(school=private_school)
# Only one school is available via all() due to the custom default manager.
self.assertQuerysetEqual(School.objects.all(), ["<School: School object>"])
self.assertEqual(public_student.school, public_school)
# Make sure the base manager is used so that an student can still access
# its related school even if the default manager doesn't normally
# allow it.
self.assertEqual(private_student.school, private_school)
# If the manager is marked "use_for_related_fields", it'll get used instead
# of the "bare" queryset. Usually you'd define this as a property on the class,
# but this approximates that in a way that's easier in tests.
School.objects.use_for_related_fields = True
try:
private_student = Student.objects.get(pk=private_student.pk)
with self.assertRaises(School.DoesNotExist):
private_student.school
finally:
School.objects.use_for_related_fields = False
def test_hasattr_related_object(self):
# The exception raised on attribute access when a related object
# doesn't exist should be an instance of a subclass of `AttributeError`
# refs #21563
self.assertFalse(hasattr(Article(), 'reporter'))
| bsd-3-clause | -956,513,722,556,745,300 | 46.364228 | 119 | 0.623468 | false |
TangibleDisplay/garden-ddd | view.py | 4 | 7639 | from object_renderer import ObjectRenderer
from kivy.core.window import Window # noqa
from kivy.lang import Builder
from kivy.properties import NumericProperty, BooleanProperty
from kivy.clock import Clock
from kivy.animation import Animation
from math import cos, sin, pi
def dist(p1, p2):
return ((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2) ** .5
class MultitouchCamera(object):
def __init__(self, **kwargs):
super(MultitouchCamera, self).__init__(**kwargs)
self.touches = []
self.touches_center = []
self.touches_dist = 0
Clock.schedule_interval(self.update_cam, 0)
def on_touch_down(self, touch):
if super(MultitouchCamera, self).on_touch_move(touch):
return True
if self.collide_point(*touch.pos):
self.touches.append(touch)
touch.grab(self)
if len(self.touches) > 1:
self.touches_center = self.get_center()
self.touches_dist = self.get_dist(self.touches_center)
def update_cam(self, dt):
if not self.touches:
return
elif len(self.touches) == 1:
self.cam_translation[0] += self.touches[0].dx / 100.
self.cam_translation[1] += self.touches[0].dy / 100.
else:
c = self.get_center()
d = self.get_dist(c)
self.cam_rotation[1] += (c[0] - self.touches_center[0]) / 5.
self.cam_rotation[0] -= (c[1] - self.touches_center[1]) / 5.
self.cam_translation[2] += min(.5, max(-.5, (d - self.touches_dist) / 10.))
self.touches_center = c
self.touches_dist = d
return True
def on_touch_up(self, touch):
if touch.grab_current is self:
touch.ungrab(self)
self.touches.remove(touch)
self.touches_center = self.get_center()
self.touches_dist = self.get_dist(self.touches_center)
else:
return super(MultitouchCamera, self).on_touch_move(touch)
def get_center(self):
return (
sum(t.x for t in self.touches) / float(len(self.touches)),
sum(t.y for t in self.touches) / float(len(self.touches))
) if self.touches else self.center
def get_dist(self, center):
return (sum(
dist(t.pos, center)
for t in self.touches
) / float(len(self.touches))) if self.touches else 0
class MultitouchCenteredCamera(MultitouchCamera):
def setup_scene(self):
super(MultitouchCenteredCamera, self).setup_scene()
self.cam_rot_x.origin = (0, 0, 0)
self.cam_rot_x.origin = (0, 0, 0)
self.cam_rot_x.origin = (0, 0, 0)
def update_cam(self, dt):
if not self.touches:
return
elif len(self.touches) == 1:
self.cam_rotation[1] += self.touches[0].dx / 5.
self.cam_rotation[0] -= self.touches[0].dy / 5.
else:
c = self.get_center()
d = self.get_dist(c)
self.obj_scale += (d - self.touches_dist) / 100.
self.touches_center = c
self.touches_dist = d
return True
class BaseView(ObjectRenderer):
time = NumericProperty(0)
light_radius = NumericProperty(20)
move_light = BooleanProperty(True)
def reset(self, *args):
Animation(
cam_rotation=(20, 0, 0),
cam_translation=(0, -3, -8),
t='in_out_quad',
d=2).start(self)
def update_lights(self, dt):
if not self.move_light:
return
self.time += dt
self.time %= 2 * pi
for i in range(self.nb_lights):
a = self.time + i * 2 * pi / self.nb_lights
self.light_sources[i] = [
cos(a) * self.light_radius, 5, sin(a) * self.light_radius, 1.0]
for k in self.light_sources.keys():
if k >= self.nb_lights:
del(self.light_sources[k])
def on_touch_down(self, touch):
if self.collide_point(*touch.pos):
Clock.unschedule(self.reset)
return super(View, self).on_touch_down(touch)
def on_touch_up(self, touch):
if len(self.touches) == 1 and touch in self.touches:
Clock.unschedule(self.reset)
Clock.schedule_once(self.reset, 3)
return super(View, self).on_touch_up(touch)
class View(MultitouchCamera, BaseView):
pass
class CenteredView(MultitouchCenteredCamera, BaseView):
min_scale = NumericProperty(0)
max_scale = NumericProperty(100)
def on_obj_scale(self, *args):
self.obj_scale = max(
self.min_scale, min(
self.max_scale,
self.obj_scale))
super(CenteredView, self).on_obj_scale(*args)
KV = '''
#:import listdir os.listdir
FloatLayout:
View:
canvas.before:
Color:
rgba: .6, .6, .6, 1
Rectangle:
pos: self.pos
size: self.size
id: rendering
scene: 'data/3d/%s' % scene.text
obj_scale: scale.value if scale.value > 0 else 1
display_all: True
ambiant: ambiant.value
diffuse: diffuse.value
specular: specular.value
on_parent: self.reset()
mode: modechoice.text
light_radius: light_radius.value
nb_lights: int(nb_lights.value)
move_light: move_light.active
GridLayout:
size_hint: 1, None
rows: 2
Slider:
min: 0
max: 10
id: scale
step: .1
value: rendering.obj_scale
Spinner:
id: scene
text: 'colorplane.obj'
values:
sorted([x for x in listdir('data/3d/')
if x.lower().endswith(('.3ds', '.obj'))])
Slider:
min: 0
max: 50
step: 1
value: 10
id: light_radius
Slider:
min: 0
max: 50
step: 1
value: 4
id: nb_lights
Spinner:
text: rendering.mode
values: 'triangles', 'points', 'lines'
id: modechoice
Slider:
id: ambiant
min: 0
max: 1
value: .01
step: .01
Slider:
id: diffuse
min: 0
max: 1
value: 1
step: .01
Slider:
id: specular
min: 0
max: 1
value: 1
step: .01
CheckBox:
id: move_light
active: True
Label:
size_hint_y: .1
text: 'scale %s' % scale.value
Label:
size_hint_y: .1
text: 'scene'
Label:
size_hint_y: .1
text: 'light_radius %s' % light_radius.value
Label:
size_hint_y: .1
text: 'nb_lights %s' % nb_lights.value
Label:
text: 'mode'
Label:
size_hint_y: .1
text: 'ambiant %s' % ambiant.value
Label:
size_hint_y: .1
text: 'diffuse %s' % diffuse.value
Label:
size_hint_y: .1
text: 'specular %s' % specular.value
Label:
text: 'move lights'
'''
if __name__ == '__main__':
from kivy.app import App
class App3D(App):
def build(self):
root = Builder.load_string(KV)
Clock.schedule_interval(root.ids.rendering.update_lights, 0)
return root
App3D().run()
| mit | -6,346,216,295,763,786,000 | 25.992933 | 87 | 0.514989 | false |
timeyyy/PyUpdater | pyupdater/utils/keydb.py | 1 | 4569 | # --------------------------------------------------------------------------
# Copyright 2014 Digital Sapphire Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------------------------
from __future__ import unicode_literals
import logging
import time
from pyupdater import settings
log = logging.getLogger(__name__)
class KeyDB(object):
"""Handles finding, sorting, getting meta-data, moving packages.
Kwargs:
data_dir (str): Path to directory containing key.db
load (bool):
Meaning:
True: Load db on initialization
False: Do not load db on initialization
"""
def __init__(self, db, load=False):
self.db = db
self.data = None
if load is True:
self.load()
def add_key(self, public, private, key_type='ed25519'):
"""Adds key pair to database
Args:
public (str): Public key
private (str): Private key
key_type (str): The type of key pair. Default ed25519
"""
_time = time.time()
if self.data is None:
self.load()
num = len(self.data) + 1
data = {
'date': _time,
'public': public,
'private': private,
'revoked': False,
'key_type': key_type,
}
log.info('Adding public key to db. {}'.format(len(public)))
self.data[num] = data
self.save()
def get_public_keys(self):
"Returns a list of all valid public keys"
return self._get_keys('public')
def get_private_keys(self):
"Returns a list of all valid private keys"
return self._get_keys('private')
def _get_keys(self, key):
# Returns a list of non revoked keys
if self.data is None:
self.load()
order = []
keys = []
for k, v in self.data.items():
if v['revoked'] is False:
order.append(int(k))
else:
log.debug('Revoked key: {}'.format(k))
order = sorted(order)
for o in order:
try:
data = self.data[o]
log.debug('Got key data')
pub_key = data[key]
keys.append(pub_key)
log.debug('Got public key')
except KeyError: # pragma: no cover
log.debug('Key error')
continue
return keys
def get_revoked_key(self):
"Returns most recent revoked key pair"
if self.data is None:
self.load()
keys = []
for k, v in self.data.items():
if v['revoked'] is True:
keys.append(int(k))
if len(keys) >= 1:
key = sorted(keys)[-1]
info = self.data[key]
else:
info = None
return info
def revoke_key(self, count=1):
"""Revokes key pair
Args:
count (int): The number of keys to revoke. Oldest first
"""
if self.data is None:
self.load()
log.debug('Collecting keys')
keys = map(str, self.data.keys())
keys = sorted(keys)
c = 0
for k in keys:
if c >= count:
break
k = int(k)
if self.data.get(k) is None:
count += 1
continue
if self.data[k]['revoked'] is False:
self.data[k]['revoked'] = True
log.debug('Revoked key')
c += 1
self.save()
def load(self):
"Loads data from key.db"
self.data = self.db.load(settings.CONFIG_DB_KEY_KEYS)
if self.data is None:
log.info('Key.db file not found creating new')
self.data = dict()
def save(self):
"Saves data to key.db"
log.debug('Saving keys...')
self.db.save(settings.CONFIG_DB_KEY_KEYS, self.data)
log.debug('Saved keys...')
| bsd-2-clause | 7,798,155,614,872,261,000 | 27.917722 | 76 | 0.512585 | false |
osamak/student-portal | api/serializers.py | 2 | 4168 | # -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth import authenticate
from rest_framework import serializers, exceptions
from rest_framework.authtoken.serializers import AuthTokenSerializer
from activities.models import Activity, Episode
from activities.models import Category as ActivityCategory
from clubs.models import Club
from media.models import Buzz, BuzzView
from niqati.models import Code, Category, Collection, Order
class ModifiedAuthTokenSerializer(AuthTokenSerializer):
def validate(self, attrs):
"""Allow both usernames and emails."""
identification = attrs.get('username')
password = attrs.get('password')
if identification and password:
if '@' in identification:
username = identification.split('@')[0]
else:
username = identification
user = authenticate(username=username, password=password)
if user:
if not user.is_active:
msg = _('User account is disabled.')
raise exceptions.ValidationError(msg)
# Currently, the app is only available for students.
try:
college = user.common_profile.college
except (ObjectDoesNotExist, AttributeError):
raise exceptions.ValidationError(u"لم تسجل في بوابة إنجاز كطالب!")
else:
msg = _('Unable to log in with provided credentials.')
raise exceptions.ValidationError(msg)
else:
msg = _('Must include "username" and "password".')
raise exceptions.ValidationError(msg)
attrs['user'] = user
return attrs
class EpisodeSerializer(serializers.ModelSerializer):
class Meta:
model = Episode
fields = ('pk', 'start_datetime', 'end_datetime', 'location')
class ClubSerializer(serializers.ModelSerializer):
class Meta:
model = Club
fields = ('pk', 'name', 'english_name', 'email', 'coordinator', 'gender', 'city')
class ActivityCategorySerializer(serializers.ModelSerializer):
class Meta:
model = ActivityCategory
fields = ('pk', 'ar_name')
class ActivitySerializer(serializers.ModelSerializer):
primary_club = ClubSerializer(read_only=True)
category = ActivityCategorySerializer(read_only=True)
secondary_clubs = ClubSerializer(many=True, read_only=True)
episode_set = EpisodeSerializer(many=True, read_only=True)
class Meta:
model = Activity
fields = ('pk', 'name', 'primary_club', 'secondary_clubs', 'public_description', 'episode_set', 'gender', 'category')
class BuzzSerializer(serializers.ModelSerializer):
class Meta:
model = Buzz
fields = ('pk', 'title', 'body', 'image', 'is_push')
class BuzzViewSerializer(serializers.ModelSerializer):
class Meta:
model = BuzzView
fields = ('pk', 'viewer', 'buzz', 'off_date')
class NiqatiActivitySerializer(serializers.ModelSerializer):
class Meta:
model = Activity
fields = ('name',)
class NiqatiEpisodeSerializer(serializers.ModelSerializer):
activity = NiqatiActivitySerializer(read_only=True)
class Meta:
model = Episode
fields = ('activity',)
class OrderSerializer(serializers.ModelSerializer):
episode = NiqatiEpisodeSerializer(read_only=True)
class Meta:
model = Order
fields = ('episode',)
class NiqatiCategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = ('ar_label',)
class CollectionSerializer(serializers.ModelSerializer):
order = OrderSerializer(read_only=True)
category = NiqatiCategorySerializer(read_only=True)
class Meta:
model = Collection
fields = ('category', 'order')
class CodeSerializer(serializers.ModelSerializer):
collection = CollectionSerializer(read_only=True)
class Meta:
model = Code
fields = ('pk', 'points', 'redeem_date', 'collection')
| agpl-3.0 | 2,935,201,702,626,434,600 | 35.043478 | 125 | 0.661037 | false |
p-l-/miasm | example/jitter/mips32.py | 6 | 2383 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
from argparse import ArgumentParser
from miasm2.analysis import debugging, gdbserver
from miasm2.jitter.csts import *
from miasm2.analysis.machine import Machine
from pdb import pm
parser = ArgumentParser(
description="""Sandbox raw binary with mips32 engine
(ex: jit_mips32.py example/mips32_sc_l.bin 0)""")
parser.add_argument("-r", "--log-regs",
help="Log registers value for each instruction",
action="store_true")
parser.add_argument("-m", "--log-mn",
help="Log desassembly conversion for each instruction",
action="store_true")
parser.add_argument("-n", "--log-newbloc",
help="Log basic blocks processed by the Jitter",
action="store_true")
parser.add_argument("-j", "--jitter",
help="Jitter engine. Possible values are : tcc (default), llvm",
default="tcc")
parser.add_argument("-d", "--debugging",
help="Attach a CLI debugguer to the sandboxed programm",
action="store_true")
parser.add_argument("binary",
help="binary to run")
parser.add_argument("addr",
help="start exec on addr")
machine = Machine("mips32l")
def code_sentinelle(jitter):
jitter.run = False
jitter.pc = 0
return True
def jit_mips32_binary(args):
filepath, entryp = args.binary, int(args.addr, 16)
myjit = machine.jitter(jit_type = args.jitter)
myjit.init_stack()
# Log level (if available with jitter engine)
myjit.jit.log_regs = args.log_regs
myjit.jit.log_mn = args.log_mn
myjit.jit.log_newbloc = args.log_newbloc
myjit.vm.add_memory_page(0, PAGE_READ | PAGE_WRITE, open(filepath).read())
myjit.add_breakpoint(0x1337BEEF, code_sentinelle)
# for stack
myjit.vm.add_memory_page(0xF000, PAGE_READ | PAGE_WRITE, "\x00"*0x1000)
myjit.cpu.SP = 0xF800
myjit.cpu.RA = 0x1337BEEF
myjit.init_run(entryp)
# Handle debugging
if args.debugging is True:
dbg = debugging.Debugguer(myjit)
cmd = debugging.DebugCmd(dbg)
cmd.cmdloop()
else:
print(myjit.continue_run())
return myjit
if __name__ == '__main__':
from sys import stderr
args = parser.parse_args()
myjit = jit_mips32_binary(args)
| gpl-2.0 | -7,182,589,132,059,392,000 | 30.355263 | 84 | 0.619807 | false |
cyberark-bizdev/ansible | lib/ansible/modules/remote_management/hpilo/hpilo_boot.py | 56 | 6644 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2012 Dag Wieers <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: hpilo_boot
version_added: "2.3"
author: Dag Wieers (@dagwieers)
short_description: Boot system using specific media through HP iLO interface
description:
- "This module boots a system through its HP iLO interface. The boot media
can be one of: cdrom, floppy, hdd, network or usb."
- This module requires the hpilo python module.
options:
host:
description:
- The HP iLO hostname/address that is linked to the physical system.
required: true
login:
description:
- The login name to authenticate to the HP iLO interface.
default: Administrator
password:
description:
- The password to authenticate to the HP iLO interface.
default: admin
media:
description:
- The boot media to boot the system from
default: network
choices: [ "cdrom", "floppy", "hdd", "network", "normal", "usb" ]
image:
description:
- The URL of a cdrom, floppy or usb boot media image.
protocol://username:password@hostname:port/filename
- protocol is either 'http' or 'https'
- username:password is optional
- port is optional
state:
description:
- The state of the boot media.
- "no_boot: Do not boot from the device"
- "boot_once: Boot from the device once and then notthereafter"
- "boot_always: Boot from the device each time the serveris rebooted"
- "connect: Connect the virtual media device and set to boot_always"
- "disconnect: Disconnects the virtual media device and set to no_boot"
- "poweroff: Power off the server"
default: boot_once
choices: [ "boot_always", "boot_once", "connect", "disconnect", "no_boot", "poweroff" ]
force:
description:
- Whether to force a reboot (even when the system is already booted).
- As a safeguard, without force, hpilo_boot will refuse to reboot a server that is already running.
default: no
choices: [ "yes", "no" ]
ssl_version:
description:
- Change the ssl_version used.
default: TLSv1
choices: [ "SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2" ]
version_added: '2.4'
requirements:
- hpilo
notes:
- To use a USB key image you need to specify floppy as boot media.
- This module ought to be run from a system that can access the HP iLO
interface directly, either by using C(local_action) or using C(delegate_to).
'''
EXAMPLES = r'''
- name: Task to boot a system using an ISO from an HP iLO interface only if the system is an HP server
hpilo_boot:
host: YOUR_ILO_ADDRESS
login: YOUR_ILO_LOGIN
password: YOUR_ILO_PASSWORD
media: cdrom
image: http://some-web-server/iso/boot.iso
when: cmdb_hwmodel.startswith('HP ')
delegate_to: localhost
- name: Power off a server
hpilo_boot:
host: YOUR_ILO_HOST
login: YOUR_ILO_LOGIN
password: YOUR_ILO_PASSWORD
state: poweroff
delegate_to: localhost
'''
RETURN = '''
# Default return values
'''
import time
import warnings
try:
import hpilo
HAS_HPILO = True
except ImportError:
HAS_HPILO = False
from ansible.module_utils.basic import AnsibleModule
# Suppress warnings from hpilo
warnings.simplefilter('ignore')
def main():
module = AnsibleModule(
argument_spec=dict(
host=dict(type='str', required=True),
login=dict(type='str', default='Administrator'),
password=dict(type='str', default='admin', no_log=True),
media=dict(type='str', choices=['cdrom', 'floppy', 'rbsu', 'hdd', 'network', 'normal', 'usb']),
image=dict(type='str'),
state=dict(type='str', default='boot_once', choices=['boot_always', 'boot_once', 'connect', 'disconnect', 'no_boot', 'poweroff']),
force=dict(type='bool', default=False),
ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']),
)
)
if not HAS_HPILO:
module.fail_json(msg='The hpilo python module is required')
host = module.params['host']
login = module.params['login']
password = module.params['password']
media = module.params['media']
image = module.params['image']
state = module.params['state']
force = module.params['force']
ssl_version = getattr(hpilo.ssl, 'PROTOCOL_' + module.params.get('ssl_version').upper().replace('V', 'v'))
ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version)
changed = False
status = {}
power_status = 'UNKNOWN'
if media and state in ('boot_always', 'boot_once', 'connect', 'disconnect', 'no_boot'):
# Workaround for: Error communicating with iLO: Problem manipulating EV
try:
ilo.set_one_time_boot(media)
except hpilo.IloError:
time.sleep(60)
ilo.set_one_time_boot(media)
# TODO: Verify if image URL exists/works
if image:
ilo.insert_virtual_media(media, image)
changed = True
if media == 'cdrom':
ilo.set_vm_status('cdrom', state, True)
status = ilo.get_vm_status()
changed = True
elif media in ('floppy', 'usb'):
ilo.set_vf_status(state, True)
status = ilo.get_vf_status()
changed = True
# Only perform a boot when state is boot_once or boot_always, or in case we want to force a reboot
if state in ('boot_once', 'boot_always') or force:
power_status = ilo.get_host_power_status()
if not force and power_status == 'ON':
module.fail_json(msg='HP iLO (%s) reports that the server is already powered on !' % host)
if power_status == 'ON':
ilo.warm_boot_server()
# ilo.cold_boot_server()
changed = True
else:
ilo.press_pwr_btn()
# ilo.reset_server()
# ilo.set_host_power(host_power=True)
changed = True
elif state in ('poweroff'):
power_status = ilo.get_host_power_status()
if not power_status == 'OFF':
ilo.hold_pwr_btn()
# ilo.set_host_power(host_power=False)
changed = True
module.exit_json(changed=changed, power=power_status, **status)
if __name__ == '__main__':
main()
| gpl-3.0 | 8,933,881,236,778,294,000 | 30.942308 | 142 | 0.629892 | false |
landrito/api-client-staging | generated/python/gapic-google-cloud-monitoring-v3/google/cloud/gapic/monitoring/v3/metric_service_client.py | 7 | 30336 | # Copyright 2017, Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# EDITING INSTRUCTIONS
# This file was generated from the file
# https://github.com/google/googleapis/blob/master/google/monitoring/v3/metric_service.proto,
# and updates to that file get reflected here through a refresh process.
# For the short term, the refresh process will only be runnable by Google engineers.
#
# The only allowed edits are to method and file documentation. A 3-way
# merge preserves those additions if the generated source changes.
"""Accesses the google.monitoring.v3 MetricService API."""
import collections
import json
import os
import pkg_resources
import platform
from google.gax import api_callable
from google.gax import config
from google.gax import path_template
import google.gax
from google.api import metric_pb2 as api_metric_pb2
from google.cloud.gapic.monitoring.v3 import enums
from google.cloud.proto.monitoring.v3 import common_pb2
from google.cloud.proto.monitoring.v3 import metric_pb2 as v3_metric_pb2
from google.cloud.proto.monitoring.v3 import metric_service_pb2
_PageDesc = google.gax.PageDescriptor
class MetricServiceClient(object):
"""
Manages metric descriptors, monitored resource descriptors, and
time series data.
"""
SERVICE_ADDRESS = 'monitoring.googleapis.com'
"""The default address of the service."""
DEFAULT_SERVICE_PORT = 443
"""The default port of the service."""
_PAGE_DESCRIPTORS = {
'list_monitored_resource_descriptors':
_PageDesc('page_token', 'next_page_token', 'resource_descriptors'),
'list_metric_descriptors': _PageDesc('page_token', 'next_page_token',
'metric_descriptors'),
'list_time_series': _PageDesc('page_token', 'next_page_token',
'time_series')
}
# The scopes needed to make gRPC calls to all of the methods defined in
# this service
_ALL_SCOPES = (
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/monitoring',
'https://www.googleapis.com/auth/monitoring.read',
'https://www.googleapis.com/auth/monitoring.write', )
_PROJECT_PATH_TEMPLATE = path_template.PathTemplate('projects/{project}')
_METRIC_DESCRIPTOR_PATH_TEMPLATE = path_template.PathTemplate(
'projects/{project}/metricDescriptors/{metric_descriptor=**}')
_MONITORED_RESOURCE_DESCRIPTOR_PATH_TEMPLATE = path_template.PathTemplate(
'projects/{project}/monitoredResourceDescriptors/{monitored_resource_descriptor}'
)
@classmethod
def project_path(cls, project):
"""Returns a fully-qualified project resource name string."""
return cls._PROJECT_PATH_TEMPLATE.render({'project': project, })
@classmethod
def metric_descriptor_path(cls, project, metric_descriptor):
"""Returns a fully-qualified metric_descriptor resource name string."""
return cls._METRIC_DESCRIPTOR_PATH_TEMPLATE.render({
'project': project,
'metric_descriptor': metric_descriptor,
})
@classmethod
def monitored_resource_descriptor_path(cls, project,
monitored_resource_descriptor):
"""Returns a fully-qualified monitored_resource_descriptor resource name string."""
return cls._MONITORED_RESOURCE_DESCRIPTOR_PATH_TEMPLATE.render({
'project': project,
'monitored_resource_descriptor': monitored_resource_descriptor,
})
@classmethod
def match_project_from_project_name(cls, project_name):
"""Parses the project from a project resource.
Args:
project_name (string): A fully-qualified path representing a project
resource.
Returns:
A string representing the project.
"""
return cls._PROJECT_PATH_TEMPLATE.match(project_name).get('project')
@classmethod
def match_project_from_metric_descriptor_name(cls, metric_descriptor_name):
"""Parses the project from a metric_descriptor resource.
Args:
metric_descriptor_name (string): A fully-qualified path representing a metric_descriptor
resource.
Returns:
A string representing the project.
"""
return cls._METRIC_DESCRIPTOR_PATH_TEMPLATE.match(
metric_descriptor_name).get('project')
@classmethod
def match_metric_descriptor_from_metric_descriptor_name(
cls, metric_descriptor_name):
"""Parses the metric_descriptor from a metric_descriptor resource.
Args:
metric_descriptor_name (string): A fully-qualified path representing a metric_descriptor
resource.
Returns:
A string representing the metric_descriptor.
"""
return cls._METRIC_DESCRIPTOR_PATH_TEMPLATE.match(
metric_descriptor_name).get('metric_descriptor')
@classmethod
def match_project_from_monitored_resource_descriptor_name(
cls, monitored_resource_descriptor_name):
"""Parses the project from a monitored_resource_descriptor resource.
Args:
monitored_resource_descriptor_name (string): A fully-qualified path representing a monitored_resource_descriptor
resource.
Returns:
A string representing the project.
"""
return cls._MONITORED_RESOURCE_DESCRIPTOR_PATH_TEMPLATE.match(
monitored_resource_descriptor_name).get('project')
@classmethod
def match_monitored_resource_descriptor_from_monitored_resource_descriptor_name(
cls, monitored_resource_descriptor_name):
"""Parses the monitored_resource_descriptor from a monitored_resource_descriptor resource.
Args:
monitored_resource_descriptor_name (string): A fully-qualified path representing a monitored_resource_descriptor
resource.
Returns:
A string representing the monitored_resource_descriptor.
"""
return cls._MONITORED_RESOURCE_DESCRIPTOR_PATH_TEMPLATE.match(
monitored_resource_descriptor_name).get(
'monitored_resource_descriptor')
def __init__(self,
service_path=SERVICE_ADDRESS,
port=DEFAULT_SERVICE_PORT,
channel=None,
credentials=None,
ssl_credentials=None,
scopes=None,
client_config=None,
app_name=None,
app_version='',
lib_name=None,
lib_version='',
metrics_headers=()):
"""Constructor.
Args:
service_path (string): The domain name of the API remote host.
port (int): The port on which to connect to the remote host.
channel (:class:`grpc.Channel`): A ``Channel`` instance through
which to make calls.
credentials (object): The authorization credentials to attach to
requests. These credentials identify this application to the
service.
ssl_credentials (:class:`grpc.ChannelCredentials`): A
``ChannelCredentials`` instance for use with an SSL-enabled
channel.
scopes (list[string]): A list of OAuth2 scopes to attach to requests.
client_config (dict):
A dictionary for call options for each method. See
:func:`google.gax.construct_settings` for the structure of
this data. Falls back to the default config if not specified
or the specified config is missing data points.
app_name (string): The name of the application calling
the service. Recommended for analytics purposes.
app_version (string): The version of the application calling
the service. Recommended for analytics purposes.
lib_name (string): The API library software used for calling
the service. (Unless you are writing an API client itself,
leave this as default.)
lib_version (string): The API library software version used
for calling the service. (Unless you are writing an API client
itself, leave this as default.)
metrics_headers (dict): A dictionary of values for tracking
client library metrics. Ultimately serializes to a string
(e.g. 'foo/1.2.3 bar/3.14.1'). This argument should be
considered private.
Returns:
A MetricServiceClient object.
"""
# Unless the calling application specifically requested
# OAuth scopes, request everything.
if scopes is None:
scopes = self._ALL_SCOPES
# Initialize an empty client config, if none is set.
if client_config is None:
client_config = {}
# Initialize metrics_headers as an ordered dictionary
# (cuts down on cardinality of the resulting string slightly).
metrics_headers = collections.OrderedDict(metrics_headers)
metrics_headers['gl-python'] = platform.python_version()
# The library may or may not be set, depending on what is
# calling this client. Newer client libraries set the library name
# and version.
if lib_name:
metrics_headers[lib_name] = lib_version
# Finally, track the GAPIC package version.
metrics_headers['gapic'] = pkg_resources.get_distribution(
'gapic-google-cloud-monitoring-v3', ).version
# Load the configuration defaults.
default_client_config = json.loads(
pkg_resources.resource_string(
__name__, 'metric_service_client_config.json').decode())
defaults = api_callable.construct_settings(
'google.monitoring.v3.MetricService',
default_client_config,
client_config,
config.STATUS_CODE_NAMES,
metrics_headers=metrics_headers,
page_descriptors=self._PAGE_DESCRIPTORS, )
self.metric_service_stub = config.create_stub(
metric_service_pb2.MetricServiceStub,
channel=channel,
service_path=service_path,
service_port=port,
credentials=credentials,
scopes=scopes,
ssl_credentials=ssl_credentials)
self._list_monitored_resource_descriptors = api_callable.create_api_call(
self.metric_service_stub.ListMonitoredResourceDescriptors,
settings=defaults['list_monitored_resource_descriptors'])
self._get_monitored_resource_descriptor = api_callable.create_api_call(
self.metric_service_stub.GetMonitoredResourceDescriptor,
settings=defaults['get_monitored_resource_descriptor'])
self._list_metric_descriptors = api_callable.create_api_call(
self.metric_service_stub.ListMetricDescriptors,
settings=defaults['list_metric_descriptors'])
self._get_metric_descriptor = api_callable.create_api_call(
self.metric_service_stub.GetMetricDescriptor,
settings=defaults['get_metric_descriptor'])
self._create_metric_descriptor = api_callable.create_api_call(
self.metric_service_stub.CreateMetricDescriptor,
settings=defaults['create_metric_descriptor'])
self._delete_metric_descriptor = api_callable.create_api_call(
self.metric_service_stub.DeleteMetricDescriptor,
settings=defaults['delete_metric_descriptor'])
self._list_time_series = api_callable.create_api_call(
self.metric_service_stub.ListTimeSeries,
settings=defaults['list_time_series'])
self._create_time_series = api_callable.create_api_call(
self.metric_service_stub.CreateTimeSeries,
settings=defaults['create_time_series'])
# Service calls
def list_monitored_resource_descriptors(self,
name,
filter_='',
page_size=0,
options=None):
"""
Lists monitored resource descriptors that match a filter. This method does not require a Stackdriver account.
Example:
>>> from google.cloud.gapic.monitoring.v3 import metric_service_client
>>> from google.gax import CallOptions, INITIAL_PAGE
>>> api = metric_service_client.MetricServiceClient()
>>> name = api.project_path('[PROJECT]')
>>>
>>> # Iterate over all results
>>> for element in api.list_monitored_resource_descriptors(name):
>>> # process element
>>> pass
>>>
>>> # Or iterate over results one page at a time
>>> for page in api.list_monitored_resource_descriptors(name, options=CallOptions(page_token=INITIAL_PAGE)):
>>> for element in page:
>>> # process element
>>> pass
Args:
name (string): The project on which to execute the request. The format is
``\"projects/{project_id_or_number}\"``.
filter_ (string): An optional `filter <https://cloud.google.com/monitoring/api/v3/filters>`_ describing
the descriptors to be returned. The filter can reference
the descriptor's type and labels. For example, the
following filter returns only Google Compute Engine descriptors
that have an ``id`` label:
::
resource.type = starts_with(\"gce_\") AND resource.label:id
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.gax.PageIterator` instance. By default, this
is an iterable of :class:`google.api.monitored_resource_pb2.MonitoredResourceDescriptor` instances.
This object can also be configured to iterate over the pages
of the response through the `CallOptions` parameter.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = metric_service_pb2.ListMonitoredResourceDescriptorsRequest(
name=name, filter=filter_, page_size=page_size)
return self._list_monitored_resource_descriptors(request, options)
def get_monitored_resource_descriptor(self, name, options=None):
"""
Gets a single monitored resource descriptor. This method does not require a Stackdriver account.
Example:
>>> from google.cloud.gapic.monitoring.v3 import metric_service_client
>>> api = metric_service_client.MetricServiceClient()
>>> name = api.monitored_resource_descriptor_path('[PROJECT]', '[MONITORED_RESOURCE_DESCRIPTOR]')
>>> response = api.get_monitored_resource_descriptor(name)
Args:
name (string): The monitored resource descriptor to get. The format is
``\"projects/{project_id_or_number}/monitoredResourceDescriptors/{resource_type}\"``.
The ``{resource_type}`` is a predefined type, such as
``cloudsql_database``.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.api.monitored_resource_pb2.MonitoredResourceDescriptor` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = metric_service_pb2.GetMonitoredResourceDescriptorRequest(
name=name)
return self._get_monitored_resource_descriptor(request, options)
def list_metric_descriptors(self,
name,
filter_='',
page_size=0,
options=None):
"""
Lists metric descriptors that match a filter. This method does not require a Stackdriver account.
Example:
>>> from google.cloud.gapic.monitoring.v3 import metric_service_client
>>> from google.gax import CallOptions, INITIAL_PAGE
>>> api = metric_service_client.MetricServiceClient()
>>> name = api.project_path('[PROJECT]')
>>>
>>> # Iterate over all results
>>> for element in api.list_metric_descriptors(name):
>>> # process element
>>> pass
>>>
>>> # Or iterate over results one page at a time
>>> for page in api.list_metric_descriptors(name, options=CallOptions(page_token=INITIAL_PAGE)):
>>> for element in page:
>>> # process element
>>> pass
Args:
name (string): The project on which to execute the request. The format is
``\"projects/{project_id_or_number}\"``.
filter_ (string): If this field is empty, all custom and
system-defined metric descriptors are returned.
Otherwise, the `filter <https://cloud.google.com/monitoring/api/v3/filters>`_
specifies which metric descriptors are to be
returned. For example, the following filter matches all
`custom metrics <https://cloud.google.com/monitoring/custom-metrics>`_:
::
metric.type = starts_with(\"custom.googleapis.com/\")
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.gax.PageIterator` instance. By default, this
is an iterable of :class:`google.api.metric_pb2.MetricDescriptor` instances.
This object can also be configured to iterate over the pages
of the response through the `CallOptions` parameter.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = metric_service_pb2.ListMetricDescriptorsRequest(
name=name, filter=filter_, page_size=page_size)
return self._list_metric_descriptors(request, options)
def get_metric_descriptor(self, name, options=None):
"""
Gets a single metric descriptor. This method does not require a Stackdriver account.
Example:
>>> from google.cloud.gapic.monitoring.v3 import metric_service_client
>>> api = metric_service_client.MetricServiceClient()
>>> name = api.metric_descriptor_path('[PROJECT]', '[METRIC_DESCRIPTOR]')
>>> response = api.get_metric_descriptor(name)
Args:
name (string): The metric descriptor on which to execute the request. The format is
``\"projects/{project_id_or_number}/metricDescriptors/{metric_id}\"``.
An example value of ``{metric_id}`` is
``\"compute.googleapis.com/instance/disk/read_bytes_count\"``.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.api.metric_pb2.MetricDescriptor` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = metric_service_pb2.GetMetricDescriptorRequest(name=name)
return self._get_metric_descriptor(request, options)
def create_metric_descriptor(self, name, metric_descriptor, options=None):
"""
Creates a new metric descriptor.
User-created metric descriptors define
`custom metrics <https://cloud.google.com/monitoring/custom-metrics>`_.
Example:
>>> from google.cloud.gapic.monitoring.v3 import metric_service_client
>>> from google.api import metric_pb2 as api_metric_pb2
>>> api = metric_service_client.MetricServiceClient()
>>> name = api.project_path('[PROJECT]')
>>> metric_descriptor = api_metric_pb2.MetricDescriptor()
>>> response = api.create_metric_descriptor(name, metric_descriptor)
Args:
name (string): The project on which to execute the request. The format is
``\"projects/{project_id_or_number}\"``.
metric_descriptor (:class:`google.api.metric_pb2.MetricDescriptor`): The new `custom metric <https://cloud.google.com/monitoring/custom-metrics>`_
descriptor.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.api.metric_pb2.MetricDescriptor` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = metric_service_pb2.CreateMetricDescriptorRequest(
name=name, metric_descriptor=metric_descriptor)
return self._create_metric_descriptor(request, options)
def delete_metric_descriptor(self, name, options=None):
"""
Deletes a metric descriptor. Only user-created
`custom metrics <https://cloud.google.com/monitoring/custom-metrics>`_ can be deleted.
Example:
>>> from google.cloud.gapic.monitoring.v3 import metric_service_client
>>> api = metric_service_client.MetricServiceClient()
>>> name = api.metric_descriptor_path('[PROJECT]', '[METRIC_DESCRIPTOR]')
>>> api.delete_metric_descriptor(name)
Args:
name (string): The metric descriptor on which to execute the request. The format is
``\"projects/{project_id_or_number}/metricDescriptors/{metric_id}\"``.
An example of ``{metric_id}`` is:
``\"custom.googleapis.com/my_test_metric\"``.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = metric_service_pb2.DeleteMetricDescriptorRequest(name=name)
self._delete_metric_descriptor(request, options)
def list_time_series(self,
name,
filter_,
interval,
view,
aggregation=None,
order_by='',
page_size=0,
options=None):
"""
Lists time series that match a filter. This method does not require a Stackdriver account.
Example:
>>> from google.cloud.gapic.monitoring.v3 import metric_service_client
>>> from google.cloud.gapic.monitoring.v3 import enums
>>> from google.cloud.proto.monitoring.v3 import common_pb2
>>> from google.gax import CallOptions, INITIAL_PAGE
>>> api = metric_service_client.MetricServiceClient()
>>> name = api.project_path('[PROJECT]')
>>> filter_ = ''
>>> interval = common_pb2.TimeInterval()
>>> view = enums.ListTimeSeriesRequest.TimeSeriesView.FULL
>>>
>>> # Iterate over all results
>>> for element in api.list_time_series(name, filter_, interval, view):
>>> # process element
>>> pass
>>>
>>> # Or iterate over results one page at a time
>>> for page in api.list_time_series(name, filter_, interval, view, options=CallOptions(page_token=INITIAL_PAGE)):
>>> for element in page:
>>> # process element
>>> pass
Args:
name (string): The project on which to execute the request. The format is
\"projects/{project_id_or_number}\".
filter_ (string): A `monitoring filter <https://cloud.google.com/monitoring/api/v3/filters>`_ that specifies which time
series should be returned. The filter must specify a single metric type,
and can additionally specify metric labels and other information. For
example:
::
metric.type = \"compute.googleapis.com/instance/cpu/usage_time\" AND
metric.label.instance_name = \"my-instance-name\"
interval (:class:`google.cloud.proto.monitoring.v3.common_pb2.TimeInterval`): The time interval for which results should be returned. Only time series
that contain data points in the specified interval are included
in the response.
aggregation (:class:`google.cloud.proto.monitoring.v3.common_pb2.Aggregation`): By default, the raw time series data is returned.
Use this field to combine multiple time series for different
views of the data.
order_by (string): Specifies the order in which the points of the time series should
be returned. By default, results are not ordered. Currently,
this field must be left blank.
view (enum :class:`google.cloud.gapic.monitoring.v3.enums.ListTimeSeriesRequest.TimeSeriesView`): Specifies which information is returned about the time series.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.gax.PageIterator` instance. By default, this
is an iterable of :class:`google.cloud.proto.monitoring.v3.metric_pb2.TimeSeries` instances.
This object can also be configured to iterate over the pages
of the response through the `CallOptions` parameter.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
if aggregation is None:
aggregation = common_pb2.Aggregation()
# Create the request object.
request = metric_service_pb2.ListTimeSeriesRequest(
name=name,
filter=filter_,
interval=interval,
view=view,
aggregation=aggregation,
order_by=order_by,
page_size=page_size)
return self._list_time_series(request, options)
def create_time_series(self, name, time_series, options=None):
"""
Creates or adds data to one or more time series.
The response is empty if all time series in the request were written.
If any time series could not be written, a corresponding failure message is
included in the error response.
Example:
>>> from google.cloud.gapic.monitoring.v3 import metric_service_client
>>> from google.cloud.proto.monitoring.v3 import metric_pb2 as v3_metric_pb2
>>> api = metric_service_client.MetricServiceClient()
>>> name = api.project_path('[PROJECT]')
>>> time_series = []
>>> api.create_time_series(name, time_series)
Args:
name (string): The project on which to execute the request. The format is
``\"projects/{project_id_or_number}\"``.
time_series (list[:class:`google.cloud.proto.monitoring.v3.metric_pb2.TimeSeries`]): The new data to be added to a list of time series.
Adds at most one data point to each of several time series. The new data
point must be more recent than any other point in its time series. Each
``TimeSeries`` value must fully specify a unique time series by supplying
all label values for the metric and the monitored resource.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = metric_service_pb2.CreateTimeSeriesRequest(
name=name, time_series=time_series)
self._create_time_series(request, options)
| bsd-3-clause | 7,041,997,193,956,691,000 | 45.173516 | 170 | 0.630538 | false |
mdeemer/XlsxWriter | xlsxwriter/test/comparison/test_chartsheet09.py | 8 | 1773 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chartsheet09.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the worksheet properties of an XlsxWriter chartsheet filewith series format properties."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chartsheet = workbook.add_chartsheet()
chart = workbook.add_chart({'type': 'bar'})
chart.axis_ids = [49044480, 49055232]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'values': '=Sheet1!$A$1:$A$5',
'border': {'color': 'yellow'},
'fill': {'color': 'red'},
})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chartsheet.set_chart(chart)
chartsheet.activate()
workbook.close()
self.assertExcelEqual()
| bsd-2-clause | 607,511,633,272,432,300 | 25.462687 | 106 | 0.545403 | false |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/pip/_vendor/distlib/locators.py | 328 | 51013 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2015 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import gzip
from io import BytesIO
import json
import logging
import os
import posixpath
import re
try:
import threading
except ImportError: # pragma: no cover
import dummy_threading as threading
import zlib
from . import DistlibException
from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url,
queue, quote, unescape, string_types, build_opener,
HTTPRedirectHandler as BaseRedirectHandler, text_type,
Request, HTTPError, URLError)
from .database import Distribution, DistributionPath, make_dist
from .metadata import Metadata
from .util import (cached_property, parse_credentials, ensure_slash,
split_filename, get_project_data, parse_requirement,
parse_name_and_version, ServerProxy, normalize_name)
from .version import get_scheme, UnsupportedVersionError
from .wheel import Wheel, is_compatible
logger = logging.getLogger(__name__)
HASHER_HASH = re.compile('^(\w+)=([a-f0-9]+)')
CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I)
HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml')
DEFAULT_INDEX = 'https://pypi.python.org/pypi'
def get_all_distribution_names(url=None):
"""
Return all distribution names known by an index.
:param url: The URL of the index.
:return: A list of all known distribution names.
"""
if url is None:
url = DEFAULT_INDEX
client = ServerProxy(url, timeout=3.0)
return client.list_packages()
class RedirectHandler(BaseRedirectHandler):
"""
A class to work around a bug in some Python 3.2.x releases.
"""
# There's a bug in the base version for some 3.2.x
# (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header
# returns e.g. /abc, it bails because it says the scheme ''
# is bogus, when actually it should use the request's
# URL for the scheme. See Python issue #13696.
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
newurl = None
for key in ('location', 'uri'):
if key in headers:
newurl = headers[key]
break
if newurl is None:
return
urlparts = urlparse(newurl)
if urlparts.scheme == '':
newurl = urljoin(req.get_full_url(), newurl)
if hasattr(headers, 'replace_header'):
headers.replace_header(key, newurl)
else:
headers[key] = newurl
return BaseRedirectHandler.http_error_302(self, req, fp, code, msg,
headers)
http_error_301 = http_error_303 = http_error_307 = http_error_302
class Locator(object):
"""
A base class for locators - things that locate distributions.
"""
source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz')
binary_extensions = ('.egg', '.exe', '.whl')
excluded_extensions = ('.pdf',)
# A list of tags indicating which wheels you want to match. The default
# value of None matches against the tags compatible with the running
# Python. If you want to match other values, set wheel_tags on a locator
# instance to a list of tuples (pyver, abi, arch) which you want to match.
wheel_tags = None
downloadable_extensions = source_extensions + ('.whl',)
def __init__(self, scheme='default'):
"""
Initialise an instance.
:param scheme: Because locators look for most recent versions, they
need to know the version scheme to use. This specifies
the current PEP-recommended scheme - use ``'legacy'``
if you need to support existing distributions on PyPI.
"""
self._cache = {}
self.scheme = scheme
# Because of bugs in some of the handlers on some of the platforms,
# we use our own opener rather than just using urlopen.
self.opener = build_opener(RedirectHandler())
# If get_project() is called from locate(), the matcher instance
# is set from the requirement passed to locate(). See issue #18 for
# why this can be useful to know.
self.matcher = None
self.errors = queue.Queue()
def get_errors(self):
"""
Return any errors which have occurred.
"""
result = []
while not self.errors.empty(): # pragma: no cover
try:
e = self.errors.get(False)
result.append(e)
except self.errors.Empty:
continue
self.errors.task_done()
return result
def clear_errors(self):
"""
Clear any errors which may have been logged.
"""
# Just get the errors and throw them away
self.get_errors()
def clear_cache(self):
self._cache.clear()
def _get_scheme(self):
return self._scheme
def _set_scheme(self, value):
self._scheme = value
scheme = property(_get_scheme, _set_scheme)
def _get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This should be implemented in subclasses.
If called from a locate() request, self.matcher will be set to a
matcher for the requirement to satisfy, otherwise it will be None.
"""
raise NotImplementedError('Please implement in the subclass')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Please implement in the subclass')
def get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This calls _get_project to do all the work, and just implements a caching layer on top.
"""
if self._cache is None:
result = self._get_project(name)
elif name in self._cache:
result = self._cache[name]
else:
self.clear_errors()
result = self._get_project(name)
self._cache[name] = result
return result
def score_url(self, url):
"""
Give an url a score which can be used to choose preferred URLs
for a given project release.
"""
t = urlparse(url)
basename = posixpath.basename(t.path)
compatible = True
is_wheel = basename.endswith('.whl')
if is_wheel:
compatible = is_compatible(Wheel(basename), self.wheel_tags)
return (t.scheme != 'https', 'pypi.python.org' in t.netloc,
is_wheel, compatible, basename)
def prefer_url(self, url1, url2):
"""
Choose one of two URLs where both are candidates for distribution
archives for the same version of a distribution (for example,
.tar.gz vs. zip).
The current implementation favours https:// URLs over http://, archives
from PyPI over those from other locations, wheel compatibility (if a
wheel) and then the archive name.
"""
result = url2
if url1:
s1 = self.score_url(url1)
s2 = self.score_url(url2)
if s1 > s2:
result = url1
if result != url2:
logger.debug('Not replacing %r with %r', url1, url2)
else:
logger.debug('Replacing %r with %r', url1, url2)
return result
def split_filename(self, filename, project_name):
"""
Attempt to split a filename in project name, version and Python version.
"""
return split_filename(filename, project_name)
def convert_url_to_download_info(self, url, project_name):
"""
See if a URL is a candidate for a download URL for a project (the URL
has typically been scraped from an HTML page).
If it is, a dictionary is returned with keys "name", "version",
"filename" and "url"; otherwise, None is returned.
"""
def same_project(name1, name2):
return normalize_name(name1) == normalize_name(name2)
result = None
scheme, netloc, path, params, query, frag = urlparse(url)
if frag.lower().startswith('egg='):
logger.debug('%s: version hint in fragment: %r',
project_name, frag)
m = HASHER_HASH.match(frag)
if m:
algo, digest = m.groups()
else:
algo, digest = None, None
origpath = path
if path and path[-1] == '/':
path = path[:-1]
if path.endswith('.whl'):
try:
wheel = Wheel(path)
if is_compatible(wheel, self.wheel_tags):
if project_name is None:
include = True
else:
include = same_project(wheel.name, project_name)
if include:
result = {
'name': wheel.name,
'version': wheel.version,
'filename': wheel.filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
'python-version': ', '.join(
['.'.join(list(v[2:])) for v in wheel.pyver]),
}
except Exception as e: # pragma: no cover
logger.warning('invalid path for wheel: %s', path)
elif path.endswith(self.downloadable_extensions):
path = filename = posixpath.basename(path)
for ext in self.downloadable_extensions:
if path.endswith(ext):
path = path[:-len(ext)]
t = self.split_filename(path, project_name)
if not t:
logger.debug('No match for project/version: %s', path)
else:
name, version, pyver = t
if not project_name or same_project(project_name, name):
result = {
'name': name,
'version': version,
'filename': filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
#'packagetype': 'sdist',
}
if pyver:
result['python-version'] = pyver
break
if result and algo:
result['%s_digest' % algo] = digest
return result
def _get_digest(self, info):
"""
Get a digest from a dictionary by looking at keys of the form
'algo_digest'.
Returns a 2-tuple (algo, digest) if found, else None. Currently
looks only for SHA256, then MD5.
"""
result = None
for algo in ('sha256', 'md5'):
key = '%s_digest' % algo
if key in info:
result = (algo, info[key])
break
return result
def _update_version_data(self, result, info):
"""
Update a result dictionary (the final result from _get_project) with a
dictionary for a specific version, which typically holds information
gleaned from a filename or URL for an archive for the distribution.
"""
name = info.pop('name')
version = info.pop('version')
if version in result:
dist = result[version]
md = dist.metadata
else:
dist = make_dist(name, version, scheme=self.scheme)
md = dist.metadata
dist.digest = digest = self._get_digest(info)
url = info['url']
result['digests'][url] = digest
if md.source_url != info['url']:
md.source_url = self.prefer_url(md.source_url, url)
result['urls'].setdefault(version, set()).add(url)
dist.locator = self
result[version] = dist
def locate(self, requirement, prereleases=False):
"""
Find the most recent distribution which matches the given
requirement.
:param requirement: A requirement of the form 'foo (1.0)' or perhaps
'foo (>= 1.0, < 2.0, != 1.3)'
:param prereleases: If ``True``, allow pre-release versions
to be located. Otherwise, pre-release versions
are not returned.
:return: A :class:`Distribution` instance, or ``None`` if no such
distribution could be located.
"""
result = None
r = parse_requirement(requirement)
if r is None:
raise DistlibException('Not a valid requirement: %r' % requirement)
scheme = get_scheme(self.scheme)
self.matcher = matcher = scheme.matcher(r.requirement)
logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__)
versions = self.get_project(r.name)
if len(versions) > 2: # urls and digests keys are present
# sometimes, versions are invalid
slist = []
vcls = matcher.version_class
for k in versions:
if k in ('urls', 'digests'):
continue
try:
if not matcher.match(k):
logger.debug('%s did not match %r', matcher, k)
else:
if prereleases or not vcls(k).is_prerelease:
slist.append(k)
else:
logger.debug('skipping pre-release '
'version %s of %s', k, matcher.name)
except Exception: # pragma: no cover
logger.warning('error matching %s with %r', matcher, k)
pass # slist.append(k)
if len(slist) > 1:
slist = sorted(slist, key=scheme.key)
if slist:
logger.debug('sorted list: %s', slist)
version = slist[-1]
result = versions[version]
if result:
if r.extras:
result.extras = r.extras
result.download_urls = versions.get('urls', {}).get(version, set())
d = {}
sd = versions.get('digests', {})
for url in result.download_urls:
if url in sd:
d[url] = sd[url]
result.digests = d
self.matcher = None
return result
class PyPIRPCLocator(Locator):
"""
This locator uses XML-RPC to locate distributions. It therefore
cannot be used with simple mirrors (that only mirror file content).
"""
def __init__(self, url, **kwargs):
"""
Initialise an instance.
:param url: The URL to use for XML-RPC.
:param kwargs: Passed to the superclass constructor.
"""
super(PyPIRPCLocator, self).__init__(**kwargs)
self.base_url = url
self.client = ServerProxy(url, timeout=3.0)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
return set(self.client.list_packages())
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
versions = self.client.package_releases(name, True)
for v in versions:
urls = self.client.release_urls(name, v)
data = self.client.release_data(name, v)
metadata = Metadata(scheme=self.scheme)
metadata.name = data['name']
metadata.version = data['version']
metadata.license = data.get('license')
metadata.keywords = data.get('keywords', [])
metadata.summary = data.get('summary')
dist = Distribution(metadata)
if urls:
info = urls[0]
metadata.source_url = info['url']
dist.digest = self._get_digest(info)
dist.locator = self
result[v] = dist
for info in urls:
url = info['url']
digest = self._get_digest(info)
result['urls'].setdefault(v, set()).add(url)
result['digests'][url] = digest
return result
class PyPIJSONLocator(Locator):
"""
This locator uses PyPI's JSON interface. It's very limited in functionality
and probably not worth using.
"""
def __init__(self, url, **kwargs):
super(PyPIJSONLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
url = urljoin(self.base_url, '%s/json' % quote(name))
try:
resp = self.opener.open(url)
data = resp.read().decode() # for now
d = json.loads(data)
md = Metadata(scheme=self.scheme)
data = d['info']
md.name = data['name']
md.version = data['version']
md.license = data.get('license')
md.keywords = data.get('keywords', [])
md.summary = data.get('summary')
dist = Distribution(md)
dist.locator = self
urls = d['urls']
result[md.version] = dist
for info in d['urls']:
url = info['url']
dist.download_urls.add(url)
dist.digests[url] = self._get_digest(info)
result['urls'].setdefault(md.version, set()).add(url)
result['digests'][url] = self._get_digest(info)
# Now get other releases
for version, infos in d['releases'].items():
if version == md.version:
continue # already done
omd = Metadata(scheme=self.scheme)
omd.name = md.name
omd.version = version
odist = Distribution(omd)
odist.locator = self
result[version] = odist
for info in infos:
url = info['url']
odist.download_urls.add(url)
odist.digests[url] = self._get_digest(info)
result['urls'].setdefault(version, set()).add(url)
result['digests'][url] = self._get_digest(info)
# for info in urls:
# md.source_url = info['url']
# dist.digest = self._get_digest(info)
# dist.locator = self
# for info in urls:
# url = info['url']
# result['urls'].setdefault(md.version, set()).add(url)
# result['digests'][url] = self._get_digest(info)
except Exception as e:
self.errors.put(text_type(e))
logger.exception('JSON fetch failed: %s', e)
return result
class Page(object):
"""
This class represents a scraped HTML page.
"""
# The following slightly hairy-looking regex just looks for the contents of
# an anchor link, which has an attribute "href" either immediately preceded
# or immediately followed by a "rel" attribute. The attribute values can be
# declared with double quotes, single quotes or no quotes - which leads to
# the length of the expression.
_href = re.compile("""
(rel\s*=\s*(?:"(?P<rel1>[^"]*)"|'(?P<rel2>[^']*)'|(?P<rel3>[^>\s\n]*))\s+)?
href\s*=\s*(?:"(?P<url1>[^"]*)"|'(?P<url2>[^']*)'|(?P<url3>[^>\s\n]*))
(\s+rel\s*=\s*(?:"(?P<rel4>[^"]*)"|'(?P<rel5>[^']*)'|(?P<rel6>[^>\s\n]*)))?
""", re.I | re.S | re.X)
_base = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I | re.S)
def __init__(self, data, url):
"""
Initialise an instance with the Unicode page contents and the URL they
came from.
"""
self.data = data
self.base_url = self.url = url
m = self._base.search(self.data)
if m:
self.base_url = m.group(1)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
@cached_property
def links(self):
"""
Return the URLs of all the links on a page together with information
about their "rel" attribute, for determining which ones to treat as
downloads and which ones to queue for further scraping.
"""
def clean(url):
"Tidy up an URL."
scheme, netloc, path, params, query, frag = urlparse(url)
return urlunparse((scheme, netloc, quote(path),
params, query, frag))
result = set()
for match in self._href.finditer(self.data):
d = match.groupdict('')
rel = (d['rel1'] or d['rel2'] or d['rel3'] or
d['rel4'] or d['rel5'] or d['rel6'])
url = d['url1'] or d['url2'] or d['url3']
url = urljoin(self.base_url, url)
url = unescape(url)
url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url)
result.add((url, rel))
# We sort the result, hoping to bring the most recent versions
# to the front
result = sorted(result, key=lambda t: t[0], reverse=True)
return result
class SimpleScrapingLocator(Locator):
"""
A locator which scrapes HTML pages to locate downloads for a distribution.
This runs multiple threads to do the I/O; performance is at least as good
as pip's PackageFinder, which works in an analogous fashion.
"""
# These are used to deal with various Content-Encoding schemes.
decoders = {
'deflate': zlib.decompress,
'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(d)).read(),
'none': lambda b: b,
}
def __init__(self, url, timeout=None, num_workers=10, **kwargs):
"""
Initialise an instance.
:param url: The root URL to use for scraping.
:param timeout: The timeout, in seconds, to be applied to requests.
This defaults to ``None`` (no timeout specified).
:param num_workers: The number of worker threads you want to do I/O,
This defaults to 10.
:param kwargs: Passed to the superclass.
"""
super(SimpleScrapingLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
self.timeout = timeout
self._page_cache = {}
self._seen = set()
self._to_fetch = queue.Queue()
self._bad_hosts = set()
self.skip_externals = False
self.num_workers = num_workers
self._lock = threading.RLock()
# See issue #45: we need to be resilient when the locator is used
# in a thread, e.g. with concurrent.futures. We can't use self._lock
# as it is for coordinating our internal threads - the ones created
# in _prepare_threads.
self._gplock = threading.RLock()
def _prepare_threads(self):
"""
Threads are created only when get_project is called, and terminate
before it returns. They are there primarily to parallelise I/O (i.e.
fetching web pages).
"""
self._threads = []
for i in range(self.num_workers):
t = threading.Thread(target=self._fetch)
t.setDaemon(True)
t.start()
self._threads.append(t)
def _wait_threads(self):
"""
Tell all the threads to terminate (by sending a sentinel value) and
wait for them to do so.
"""
# Note that you need two loops, since you can't say which
# thread will get each sentinel
for t in self._threads:
self._to_fetch.put(None) # sentinel
for t in self._threads:
t.join()
self._threads = []
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
with self._gplock:
self.result = result
self.project_name = name
url = urljoin(self.base_url, '%s/' % quote(name))
self._seen.clear()
self._page_cache.clear()
self._prepare_threads()
try:
logger.debug('Queueing %s', url)
self._to_fetch.put(url)
self._to_fetch.join()
finally:
self._wait_threads()
del self.result
return result
platform_dependent = re.compile(r'\b(linux-(i\d86|x86_64|arm\w+)|'
r'win(32|-amd64)|macosx-?\d+)\b', re.I)
def _is_platform_dependent(self, url):
"""
Does an URL refer to a platform-specific download?
"""
return self.platform_dependent.search(url)
def _process_download(self, url):
"""
See if an URL is a suitable download for a project.
If it is, register information in the result dictionary (for
_get_project) about the specific version it's for.
Note that the return value isn't actually used other than as a boolean
value.
"""
if self._is_platform_dependent(url):
info = None
else:
info = self.convert_url_to_download_info(url, self.project_name)
logger.debug('process_download: %s -> %s', url, info)
if info:
with self._lock: # needed because self.result is shared
self._update_version_data(self.result, info)
return info
def _should_queue(self, link, referrer, rel):
"""
Determine whether a link URL from a referring page and with a
particular "rel" attribute should be queued for scraping.
"""
scheme, netloc, path, _, _, _ = urlparse(link)
if path.endswith(self.source_extensions + self.binary_extensions +
self.excluded_extensions):
result = False
elif self.skip_externals and not link.startswith(self.base_url):
result = False
elif not referrer.startswith(self.base_url):
result = False
elif rel not in ('homepage', 'download'):
result = False
elif scheme not in ('http', 'https', 'ftp'):
result = False
elif self._is_platform_dependent(link):
result = False
else:
host = netloc.split(':', 1)[0]
if host.lower() == 'localhost':
result = False
else:
result = True
logger.debug('should_queue: %s (%s) from %s -> %s', link, rel,
referrer, result)
return result
def _fetch(self):
"""
Get a URL to fetch from the work queue, get the HTML page, examine its
links for download candidates and candidates for further scraping.
This is a handy method to run in a thread.
"""
while True:
url = self._to_fetch.get()
try:
if url:
page = self.get_page(url)
if page is None: # e.g. after an error
continue
for link, rel in page.links:
if link not in self._seen:
self._seen.add(link)
if (not self._process_download(link) and
self._should_queue(link, url, rel)):
logger.debug('Queueing %s from %s', link, url)
self._to_fetch.put(link)
except Exception as e: # pragma: no cover
self.errors.put(text_type(e))
finally:
# always do this, to avoid hangs :-)
self._to_fetch.task_done()
if not url:
#logger.debug('Sentinel seen, quitting.')
break
def get_page(self, url):
"""
Get the HTML for an URL, possibly from an in-memory cache.
XXX TODO Note: this cache is never actually cleared. It's assumed that
the data won't get stale over the lifetime of a locator instance (not
necessarily true for the default_locator).
"""
# http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api
scheme, netloc, path, _, _, _ = urlparse(url)
if scheme == 'file' and os.path.isdir(url2pathname(path)):
url = urljoin(ensure_slash(url), 'index.html')
if url in self._page_cache:
result = self._page_cache[url]
logger.debug('Returning %s from cache: %s', url, result)
else:
host = netloc.split(':', 1)[0]
result = None
if host in self._bad_hosts:
logger.debug('Skipping %s due to bad host %s', url, host)
else:
req = Request(url, headers={'Accept-encoding': 'identity'})
try:
logger.debug('Fetching %s', url)
resp = self.opener.open(req, timeout=self.timeout)
logger.debug('Fetched %s', url)
headers = resp.info()
content_type = headers.get('Content-Type', '')
if HTML_CONTENT_TYPE.match(content_type):
final_url = resp.geturl()
data = resp.read()
encoding = headers.get('Content-Encoding')
if encoding:
decoder = self.decoders[encoding] # fail if not found
data = decoder(data)
encoding = 'utf-8'
m = CHARSET.search(content_type)
if m:
encoding = m.group(1)
try:
data = data.decode(encoding)
except UnicodeError: # pragma: no cover
data = data.decode('latin-1') # fallback
result = Page(data, final_url)
self._page_cache[final_url] = result
except HTTPError as e:
if e.code != 404:
logger.exception('Fetch failed: %s: %s', url, e)
except URLError as e: # pragma: no cover
logger.exception('Fetch failed: %s: %s', url, e)
with self._lock:
self._bad_hosts.add(host)
except Exception as e: # pragma: no cover
logger.exception('Fetch failed: %s: %s', url, e)
finally:
self._page_cache[url] = result # even if None (failure)
return result
_distname_re = re.compile('<a href=[^>]*>([^<]+)<')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
page = self.get_page(self.base_url)
if not page:
raise DistlibException('Unable to get %s' % self.base_url)
for match in self._distname_re.finditer(page.data):
result.add(match.group(1))
return result
class DirectoryLocator(Locator):
"""
This class locates distributions in a directory tree.
"""
def __init__(self, path, **kwargs):
"""
Initialise an instance.
:param path: The root of the directory tree to search.
:param kwargs: Passed to the superclass constructor,
except for:
* recursive - if True (the default), subdirectories are
recursed into. If False, only the top-level directory
is searched,
"""
self.recursive = kwargs.pop('recursive', True)
super(DirectoryLocator, self).__init__(**kwargs)
path = os.path.abspath(path)
if not os.path.isdir(path): # pragma: no cover
raise DistlibException('Not a directory: %r' % path)
self.base_dir = path
def should_include(self, filename, parent):
"""
Should a filename be considered as a candidate for a distribution
archive? As well as the filename, the directory which contains it
is provided, though not used by the current implementation.
"""
return filename.endswith(self.downloadable_extensions)
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, name)
if info:
self._update_version_data(result, info)
if not self.recursive:
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, None)
if info:
result.add(info['name'])
if not self.recursive:
break
return result
class JSONLocator(Locator):
"""
This locator uses special extended metadata (not available on PyPI) and is
the basis of performant dependency resolution in distlib. Other locators
require archive downloads before dependencies can be determined! As you
might imagine, that can be slow.
"""
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
data = get_project_data(name)
if data:
for info in data.get('files', []):
if info['ptype'] != 'sdist' or info['pyversion'] != 'source':
continue
# We don't store summary in project metadata as it makes
# the data bigger for no benefit during dependency
# resolution
dist = make_dist(data['name'], info['version'],
summary=data.get('summary',
'Placeholder for summary'),
scheme=self.scheme)
md = dist.metadata
md.source_url = info['url']
# TODO SHA256 digest
if 'digest' in info and info['digest']:
dist.digest = ('md5', info['digest'])
md.dependencies = info.get('requirements', {})
dist.exports = info.get('exports', {})
result[dist.version] = dist
result['urls'].setdefault(dist.version, set()).add(info['url'])
return result
class DistPathLocator(Locator):
"""
This locator finds installed distributions in a path. It can be useful for
adding to an :class:`AggregatingLocator`.
"""
def __init__(self, distpath, **kwargs):
"""
Initialise an instance.
:param distpath: A :class:`DistributionPath` instance to search.
"""
super(DistPathLocator, self).__init__(**kwargs)
assert isinstance(distpath, DistributionPath)
self.distpath = distpath
def _get_project(self, name):
dist = self.distpath.get_distribution(name)
if dist is None:
result = {'urls': {}, 'digests': {}}
else:
result = {
dist.version: dist,
'urls': {dist.version: set([dist.source_url])},
'digests': {dist.version: set([None])}
}
return result
class AggregatingLocator(Locator):
"""
This class allows you to chain and/or merge a list of locators.
"""
def __init__(self, *locators, **kwargs):
"""
Initialise an instance.
:param locators: The list of locators to search.
:param kwargs: Passed to the superclass constructor,
except for:
* merge - if False (the default), the first successful
search from any of the locators is returned. If True,
the results from all locators are merged (this can be
slow).
"""
self.merge = kwargs.pop('merge', False)
self.locators = locators
super(AggregatingLocator, self).__init__(**kwargs)
def clear_cache(self):
super(AggregatingLocator, self).clear_cache()
for locator in self.locators:
locator.clear_cache()
def _set_scheme(self, value):
self._scheme = value
for locator in self.locators:
locator.scheme = value
scheme = property(Locator.scheme.fget, _set_scheme)
def _get_project(self, name):
result = {}
for locator in self.locators:
d = locator.get_project(name)
if d:
if self.merge:
files = result.get('urls', {})
digests = result.get('digests', {})
# next line could overwrite result['urls'], result['digests']
result.update(d)
df = result.get('urls')
if files and df:
for k, v in files.items():
if k in df:
df[k] |= v
else:
df[k] = v
dd = result.get('digests')
if digests and dd:
dd.update(digests)
else:
# See issue #18. If any dists are found and we're looking
# for specific constraints, we only return something if
# a match is found. For example, if a DirectoryLocator
# returns just foo (1.0) while we're looking for
# foo (>= 2.0), we'll pretend there was nothing there so
# that subsequent locators can be queried. Otherwise we
# would just return foo (1.0) which would then lead to a
# failure to find foo (>= 2.0), because other locators
# weren't searched. Note that this only matters when
# merge=False.
if self.matcher is None:
found = True
else:
found = False
for k in d:
if self.matcher.match(k):
found = True
break
if found:
result = d
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for locator in self.locators:
try:
result |= locator.get_distribution_names()
except NotImplementedError:
pass
return result
# We use a legacy scheme simply because most of the dists on PyPI use legacy
# versions which don't conform to PEP 426 / PEP 440.
default_locator = AggregatingLocator(
JSONLocator(),
SimpleScrapingLocator('https://pypi.python.org/simple/',
timeout=3.0),
scheme='legacy')
locate = default_locator.locate
NAME_VERSION_RE = re.compile(r'(?P<name>[\w-]+)\s*'
r'\(\s*(==\s*)?(?P<ver>[^)]+)\)$')
class DependencyFinder(object):
"""
Locate dependencies for distributions.
"""
def __init__(self, locator=None):
"""
Initialise an instance, using the specified locator
to locate distributions.
"""
self.locator = locator or default_locator
self.scheme = get_scheme(self.locator.scheme)
def add_distribution(self, dist):
"""
Add a distribution to the finder. This will update internal information
about who provides what.
:param dist: The distribution to add.
"""
logger.debug('adding distribution %s', dist)
name = dist.key
self.dists_by_name[name] = dist
self.dists[(name, dist.version)] = dist
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
self.provided.setdefault(name, set()).add((version, dist))
def remove_distribution(self, dist):
"""
Remove a distribution from the finder. This will update internal
information about who provides what.
:param dist: The distribution to remove.
"""
logger.debug('removing distribution %s', dist)
name = dist.key
del self.dists_by_name[name]
del self.dists[(name, dist.version)]
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Remove from provided: %s, %s, %s', name, version, dist)
s = self.provided[name]
s.remove((version, dist))
if not s:
del self.provided[name]
def get_matcher(self, reqt):
"""
Get a version matcher for a requirement.
:param reqt: The requirement
:type reqt: str
:return: A version matcher (an instance of
:class:`distlib.version.Matcher`).
"""
try:
matcher = self.scheme.matcher(reqt)
except UnsupportedVersionError: # pragma: no cover
# XXX compat-mode if cannot read the version
name = reqt.split()[0]
matcher = self.scheme.matcher(name)
return matcher
def find_providers(self, reqt):
"""
Find the distributions which can fulfill a requirement.
:param reqt: The requirement.
:type reqt: str
:return: A set of distribution which can fulfill the requirement.
"""
matcher = self.get_matcher(reqt)
name = matcher.key # case-insensitive
result = set()
provided = self.provided
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
result.add(provider)
break
return result
def try_to_replace(self, provider, other, problems):
"""
Attempt to replace one provider with another. This is typically used
when resolving dependencies from multiple sources, e.g. A requires
(B >= 1.0) while C requires (B >= 1.1).
For successful replacement, ``provider`` must meet all the requirements
which ``other`` fulfills.
:param provider: The provider we are trying to replace with.
:param other: The provider we're trying to replace.
:param problems: If False is returned, this will contain what
problems prevented replacement. This is currently
a tuple of the literal string 'cantreplace',
``provider``, ``other`` and the set of requirements
that ``provider`` couldn't fulfill.
:return: True if we can replace ``other`` with ``provider``, else
False.
"""
rlist = self.reqts[other]
unmatched = set()
for s in rlist:
matcher = self.get_matcher(s)
if not matcher.match(provider.version):
unmatched.add(s)
if unmatched:
# can't replace other with provider
problems.add(('cantreplace', provider, other,
frozenset(unmatched)))
result = False
else:
# can replace other with provider
self.remove_distribution(other)
del self.reqts[other]
for s in rlist:
self.reqts.setdefault(provider, set()).add(s)
self.add_distribution(provider)
result = True
return result
def find(self, requirement, meta_extras=None, prereleases=False):
"""
Find a distribution and all distributions it depends on.
:param requirement: The requirement specifying the distribution to
find, or a Distribution instance.
:param meta_extras: A list of meta extras such as :test:, :build: and
so on.
:param prereleases: If ``True``, allow pre-release versions to be
returned - otherwise, don't return prereleases
unless they're all that's available.
Return a set of :class:`Distribution` instances and a set of
problems.
The distributions returned should be such that they have the
:attr:`required` attribute set to ``True`` if they were
from the ``requirement`` passed to ``find()``, and they have the
:attr:`build_time_dependency` attribute set to ``True`` unless they
are post-installation dependencies of the ``requirement``.
The problems should be a tuple consisting of the string
``'unsatisfied'`` and the requirement which couldn't be satisfied
by any distribution known to the locator.
"""
self.provided = {}
self.dists = {}
self.dists_by_name = {}
self.reqts = {}
meta_extras = set(meta_extras or [])
if ':*:' in meta_extras:
meta_extras.remove(':*:')
# :meta: and :run: are implicitly included
meta_extras |= set([':test:', ':build:', ':dev:'])
if isinstance(requirement, Distribution):
dist = odist = requirement
logger.debug('passed %s as requirement', odist)
else:
dist = odist = self.locator.locate(requirement,
prereleases=prereleases)
if dist is None:
raise DistlibException('Unable to locate %r' % requirement)
logger.debug('located %s', odist)
dist.requested = True
problems = set()
todo = set([dist])
install_dists = set([odist])
while todo:
dist = todo.pop()
name = dist.key # case-insensitive
if name not in self.dists_by_name:
self.add_distribution(dist)
else:
#import pdb; pdb.set_trace()
other = self.dists_by_name[name]
if other != dist:
self.try_to_replace(dist, other, problems)
ireqts = dist.run_requires | dist.meta_requires
sreqts = dist.build_requires
ereqts = set()
if dist in install_dists:
for key in ('test', 'build', 'dev'):
e = ':%s:' % key
if e in meta_extras:
ereqts |= getattr(dist, '%s_requires' % key)
all_reqts = ireqts | sreqts | ereqts
for r in all_reqts:
providers = self.find_providers(r)
if not providers:
logger.debug('No providers found for %r', r)
provider = self.locator.locate(r, prereleases=prereleases)
# If no provider is found and we didn't consider
# prereleases, consider them now.
if provider is None and not prereleases:
provider = self.locator.locate(r, prereleases=True)
if provider is None:
logger.debug('Cannot satisfy %r', r)
problems.add(('unsatisfied', r))
else:
n, v = provider.key, provider.version
if (n, v) not in self.dists:
todo.add(provider)
providers.add(provider)
if r in ireqts and dist in install_dists:
install_dists.add(provider)
logger.debug('Adding %s to install_dists',
provider.name_and_version)
for p in providers:
name = p.key
if name not in self.dists_by_name:
self.reqts.setdefault(p, set()).add(r)
else:
other = self.dists_by_name[name]
if other != p:
# see if other can be replaced by p
self.try_to_replace(p, other, problems)
dists = set(self.dists.values())
for dist in dists:
dist.build_time_dependency = dist not in install_dists
if dist.build_time_dependency:
logger.debug('%s is a build-time dependency only.',
dist.name_and_version)
logger.debug('find done for %s', odist)
return dists, problems
| mit | 3,617,018,064,776,846,300 | 38.760717 | 95 | 0.524905 | false |
MarkWh1te/xueqiu_predict | python3_env/lib/python3.4/site-packages/pymysql/converters.py | 20 | 12487 | from ._compat import PY2, text_type, long_type, JYTHON, IRONPYTHON, unichr
import datetime
from decimal import Decimal
import re
import time
from .constants import FIELD_TYPE, FLAG
from .charset import charset_by_id, charset_to_encoding
def escape_item(val, charset, mapping=None):
if mapping is None:
mapping = encoders
encoder = mapping.get(type(val))
# Fallback to default when no encoder found
if not encoder:
try:
encoder = mapping[text_type]
except KeyError:
raise TypeError("no default type converter defined")
if encoder in (escape_dict, escape_sequence):
val = encoder(val, charset, mapping)
else:
val = encoder(val, mapping)
return val
def escape_dict(val, charset, mapping=None):
n = {}
for k, v in val.items():
quoted = escape_item(v, charset, mapping)
n[k] = quoted
return n
def escape_sequence(val, charset, mapping=None):
n = []
for item in val:
quoted = escape_item(item, charset, mapping)
n.append(quoted)
return "(" + ",".join(n) + ")"
def escape_set(val, charset, mapping=None):
return ','.join([escape_item(x, charset, mapping) for x in val])
def escape_bool(value, mapping=None):
return str(int(value))
def escape_object(value, mapping=None):
return str(value)
def escape_int(value, mapping=None):
return str(value)
def escape_float(value, mapping=None):
return ('%.15g' % value)
_escape_table = [unichr(x) for x in range(128)]
_escape_table[0] = u'\\0'
_escape_table[ord('\\')] = u'\\\\'
_escape_table[ord('\n')] = u'\\n'
_escape_table[ord('\r')] = u'\\r'
_escape_table[ord('\032')] = u'\\Z'
_escape_table[ord('"')] = u'\\"'
_escape_table[ord("'")] = u"\\'"
def _escape_unicode(value, mapping=None):
"""escapes *value* without adding quote.
Value should be unicode
"""
return value.translate(_escape_table)
if PY2:
def escape_string(value, mapping=None):
"""escape_string escapes *value* but not surround it with quotes.
Value should be bytes or unicode.
"""
if isinstance(value, unicode):
return _escape_unicode(value)
assert isinstance(value, (bytes, bytearray))
value = value.replace('\\', '\\\\')
value = value.replace('\0', '\\0')
value = value.replace('\n', '\\n')
value = value.replace('\r', '\\r')
value = value.replace('\032', '\\Z')
value = value.replace("'", "\\'")
value = value.replace('"', '\\"')
return value
def escape_bytes(value, mapping=None):
assert isinstance(value, (bytes, bytearray))
return b"_binary'%s'" % escape_string(value)
else:
escape_string = _escape_unicode
# On Python ~3.5, str.decode('ascii', 'surrogateescape') is slow.
# (fixed in Python 3.6, http://bugs.python.org/issue24870)
# Workaround is str.decode('latin1') then translate 0x80-0xff into 0udc80-0udcff.
# We can escape special chars and surrogateescape at once.
_escape_bytes_table = _escape_table + [chr(i) for i in range(0xdc80, 0xdd00)]
def escape_bytes(value, mapping=None):
return "_binary'%s'" % value.decode('latin1').translate(_escape_bytes_table)
def escape_unicode(value, mapping=None):
return u"'%s'" % _escape_unicode(value)
def escape_str(value, mapping=None):
return "'%s'" % escape_string(str(value), mapping)
def escape_None(value, mapping=None):
return 'NULL'
def escape_timedelta(obj, mapping=None):
seconds = int(obj.seconds) % 60
minutes = int(obj.seconds // 60) % 60
hours = int(obj.seconds // 3600) % 24 + int(obj.days) * 24
if obj.microseconds:
fmt = "'{0:02d}:{1:02d}:{2:02d}.{3:06d}'"
else:
fmt = "'{0:02d}:{1:02d}:{2:02d}'"
return fmt.format(hours, minutes, seconds, obj.microseconds)
def escape_time(obj, mapping=None):
if obj.microsecond:
fmt = "'{0.hour:02}:{0.minute:02}:{0.second:02}.{0.microsecond:06}'"
else:
fmt = "'{0.hour:02}:{0.minute:02}:{0.second:02}'"
return fmt.format(obj)
def escape_datetime(obj, mapping=None):
if obj.microsecond:
fmt = "'{0.year:04}-{0.month:02}-{0.day:02} {0.hour:02}:{0.minute:02}:{0.second:02}.{0.microsecond:06}'"
else:
fmt = "'{0.year:04}-{0.month:02}-{0.day:02} {0.hour:02}:{0.minute:02}:{0.second:02}'"
return fmt.format(obj)
def escape_date(obj, mapping=None):
fmt = "'{0.year:04}-{0.month:02}-{0.day:02}'"
return fmt.format(obj)
def escape_struct_time(obj, mapping=None):
return escape_datetime(datetime.datetime(*obj[:6]))
def _convert_second_fraction(s):
if not s:
return 0
# Pad zeros to ensure the fraction length in microseconds
s = s.ljust(6, '0')
return int(s[:6])
DATETIME_RE = re.compile(r"(\d{1,4})-(\d{1,2})-(\d{1,2})[T ](\d{1,2}):(\d{1,2}):(\d{1,2})(?:.(\d{1,6}))?")
def convert_datetime(obj):
"""Returns a DATETIME or TIMESTAMP column value as a datetime object:
>>> datetime_or_None('2007-02-25 23:06:20')
datetime.datetime(2007, 2, 25, 23, 6, 20)
>>> datetime_or_None('2007-02-25T23:06:20')
datetime.datetime(2007, 2, 25, 23, 6, 20)
Illegal values are returned as None:
>>> datetime_or_None('2007-02-31T23:06:20') is None
True
>>> datetime_or_None('0000-00-00 00:00:00') is None
True
"""
if not PY2 and isinstance(obj, (bytes, bytearray)):
obj = obj.decode('ascii')
m = DATETIME_RE.match(obj)
if not m:
return convert_date(obj)
try:
groups = list(m.groups())
groups[-1] = _convert_second_fraction(groups[-1])
return datetime.datetime(*[ int(x) for x in groups ])
except ValueError:
return convert_date(obj)
TIMEDELTA_RE = re.compile(r"(-)?(\d{1,3}):(\d{1,2}):(\d{1,2})(?:.(\d{1,6}))?")
def convert_timedelta(obj):
"""Returns a TIME column as a timedelta object:
>>> timedelta_or_None('25:06:17')
datetime.timedelta(1, 3977)
>>> timedelta_or_None('-25:06:17')
datetime.timedelta(-2, 83177)
Illegal values are returned as None:
>>> timedelta_or_None('random crap') is None
True
Note that MySQL always returns TIME columns as (+|-)HH:MM:SS, but
can accept values as (+|-)DD HH:MM:SS. The latter format will not
be parsed correctly by this function.
"""
if not PY2 and isinstance(obj, (bytes, bytearray)):
obj = obj.decode('ascii')
m = TIMEDELTA_RE.match(obj)
if not m:
return None
try:
groups = list(m.groups())
groups[-1] = _convert_second_fraction(groups[-1])
negate = -1 if groups[0] else 1
hours, minutes, seconds, microseconds = groups[1:]
tdelta = datetime.timedelta(
hours = int(hours),
minutes = int(minutes),
seconds = int(seconds),
microseconds = int(microseconds)
) * negate
return tdelta
except ValueError:
return None
TIME_RE = re.compile(r"(\d{1,2}):(\d{1,2}):(\d{1,2})(?:.(\d{1,6}))?")
def convert_time(obj):
"""Returns a TIME column as a time object:
>>> time_or_None('15:06:17')
datetime.time(15, 6, 17)
Illegal values are returned as None:
>>> time_or_None('-25:06:17') is None
True
>>> time_or_None('random crap') is None
True
Note that MySQL always returns TIME columns as (+|-)HH:MM:SS, but
can accept values as (+|-)DD HH:MM:SS. The latter format will not
be parsed correctly by this function.
Also note that MySQL's TIME column corresponds more closely to
Python's timedelta and not time. However if you want TIME columns
to be treated as time-of-day and not a time offset, then you can
use set this function as the converter for FIELD_TYPE.TIME.
"""
if not PY2 and isinstance(obj, (bytes, bytearray)):
obj = obj.decode('ascii')
m = TIME_RE.match(obj)
if not m:
return None
try:
groups = list(m.groups())
groups[-1] = _convert_second_fraction(groups[-1])
hours, minutes, seconds, microseconds = groups
return datetime.time(hour=int(hours), minute=int(minutes),
second=int(seconds), microsecond=int(microseconds))
except ValueError:
return None
def convert_date(obj):
"""Returns a DATE column as a date object:
>>> date_or_None('2007-02-26')
datetime.date(2007, 2, 26)
Illegal values are returned as None:
>>> date_or_None('2007-02-31') is None
True
>>> date_or_None('0000-00-00') is None
True
"""
if not PY2 and isinstance(obj, (bytes, bytearray)):
obj = obj.decode('ascii')
try:
return datetime.date(*[ int(x) for x in obj.split('-', 2) ])
except ValueError:
return None
def convert_mysql_timestamp(timestamp):
"""Convert a MySQL TIMESTAMP to a Timestamp object.
MySQL >= 4.1 returns TIMESTAMP in the same format as DATETIME:
>>> mysql_timestamp_converter('2007-02-25 22:32:17')
datetime.datetime(2007, 2, 25, 22, 32, 17)
MySQL < 4.1 uses a big string of numbers:
>>> mysql_timestamp_converter('20070225223217')
datetime.datetime(2007, 2, 25, 22, 32, 17)
Illegal values are returned as None:
>>> mysql_timestamp_converter('2007-02-31 22:32:17') is None
True
>>> mysql_timestamp_converter('00000000000000') is None
True
"""
if not PY2 and isinstance(timestamp, (bytes, bytearray)):
timestamp = timestamp.decode('ascii')
if timestamp[4] == '-':
return convert_datetime(timestamp)
timestamp += "0"*(14-len(timestamp)) # padding
year, month, day, hour, minute, second = \
int(timestamp[:4]), int(timestamp[4:6]), int(timestamp[6:8]), \
int(timestamp[8:10]), int(timestamp[10:12]), int(timestamp[12:14])
try:
return datetime.datetime(year, month, day, hour, minute, second)
except ValueError:
return None
def convert_set(s):
if isinstance(s, (bytes, bytearray)):
return set(s.split(b","))
return set(s.split(","))
def through(x):
return x
#def convert_bit(b):
# b = "\x00" * (8 - len(b)) + b # pad w/ zeroes
# return struct.unpack(">Q", b)[0]
#
# the snippet above is right, but MySQLdb doesn't process bits,
# so we shouldn't either
convert_bit = through
def convert_characters(connection, field, data):
field_charset = charset_by_id(field.charsetnr).name
encoding = charset_to_encoding(field_charset)
if field.flags & FLAG.SET:
return convert_set(data.decode(encoding))
if field.flags & FLAG.BINARY:
return data
if connection.use_unicode:
data = data.decode(encoding)
elif connection.charset != field_charset:
data = data.decode(encoding)
data = data.encode(connection.encoding)
return data
encoders = {
bool: escape_bool,
int: escape_int,
long_type: escape_int,
float: escape_float,
str: escape_str,
text_type: escape_unicode,
tuple: escape_sequence,
list: escape_sequence,
set: escape_sequence,
frozenset: escape_sequence,
dict: escape_dict,
bytearray: escape_bytes,
type(None): escape_None,
datetime.date: escape_date,
datetime.datetime: escape_datetime,
datetime.timedelta: escape_timedelta,
datetime.time: escape_time,
time.struct_time: escape_struct_time,
Decimal: escape_object,
}
if not PY2 or JYTHON or IRONPYTHON:
encoders[bytes] = escape_bytes
decoders = {
FIELD_TYPE.BIT: convert_bit,
FIELD_TYPE.TINY: int,
FIELD_TYPE.SHORT: int,
FIELD_TYPE.LONG: int,
FIELD_TYPE.FLOAT: float,
FIELD_TYPE.DOUBLE: float,
FIELD_TYPE.LONGLONG: int,
FIELD_TYPE.INT24: int,
FIELD_TYPE.YEAR: int,
FIELD_TYPE.TIMESTAMP: convert_mysql_timestamp,
FIELD_TYPE.DATETIME: convert_datetime,
FIELD_TYPE.TIME: convert_timedelta,
FIELD_TYPE.DATE: convert_date,
FIELD_TYPE.SET: convert_set,
FIELD_TYPE.BLOB: through,
FIELD_TYPE.TINY_BLOB: through,
FIELD_TYPE.MEDIUM_BLOB: through,
FIELD_TYPE.LONG_BLOB: through,
FIELD_TYPE.STRING: through,
FIELD_TYPE.VAR_STRING: through,
FIELD_TYPE.VARCHAR: through,
FIELD_TYPE.DECIMAL: Decimal,
FIELD_TYPE.NEWDECIMAL: Decimal,
}
# for MySQLdb compatibility
conversions = encoders.copy()
conversions.update(decoders)
Thing2Literal = escape_str
| mit | 2,490,726,471,220,942,000 | 28.801909 | 112 | 0.618563 | false |
rayNymous/nupic | src/nupic/regions/ImageSensorExplorers/Jiggle.py | 17 | 5887 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from nupic.regions.ImageSensorExplorers.SpiralSweep import SpiralSweep
DEBUG = 0
class Jiggle(SpiralSweep):
"""
This explorer jiggles the image around each possible location within a specified
radius, being careful not to repeat movements already performed between any
two locations for maximum efficiency.
This explorer can be an efficient means of training a first order temporal
learner for translation shifts within a certain radius from center.
"""
def __init__(self, shift=1, radius=1, *args, **kwargs):
"""
@param shift -- Number of pixels to shift each time.
@param radius -- maximum amount to move away from center. Must be a multiple
of 'shift'
"""
assert(radius >= 1)
SpiralSweep.__init__(self, radius, *args, **kwargs)
if (radius % shift) != 0:
raise RuntimeError("radius must be a multiple of shift")
# Generate the location offsets that we will move to for each image
self.offsets = self._generateOffsets(shift, radius)
self.index = 0
def _generateOffsets(self, shift, radius):
"""
Generate the list of offsets we will visit for each image/filter combination
@param shift - how much each location is separated by
@param radius - maximum radius away from center to move
"""
# Table mapping a jiggle index to the relative location
gJiggleOffsets = [ \
# direction jiggleIndex's
( 0, shift), # up (1,2)
(-shift, shift), # up-right (3,4)
(-shift, 0), # right (5,6)
(-shift,-shift), # down-right (7,8)
( 0, -shift), # down (9,10)
( shift,-shift), # down-left (11,12)
( shift, 0), # left (13,14)
( shift, shift), # up-left (15,16)
]
gJigglesPerformed = []
# ------------------------------------------------------------------------
# Find the next "jiggle" for the current offset
def nextJiggle(location, jiggleIndex, jiggleOffsets, jigglesPerformed):
"""
Find the next jiggle around the current location
@param location - current location
@param jiggleIndex - current jiggle index
@param jiggleOffsets - table of jiggle offsets for each location
@param jigglesPerformed - which jiggles we've performed already between
all possible locations
@retval (jiggleIndex, jiggleTo)
or None if we've already visited all neighbors from this location
"""
#global jigglesPerformed, jiggleOffsets
while True:
jiggleIndex += 1
if jiggleIndex > 16:
return (None, None)
src = tuple(location)
dst = (src[0] + jiggleOffsets[(jiggleIndex-1)/2][0],
src[1] + jiggleOffsets[(jiggleIndex-1)/2][1])
# If the dst is outside our radius, skip it
if max(abs(dst[0]), abs(dst[1])) > radius:
continue
# Going away or coming back?
if (jiggleIndex & 1) == 0:
(src, dst) = (dst, src)
# If we've already peformed this transition between src and dst, skip it
if (jiggleIndex & 1):
awayJiggle = (src, dst)
backJiggle = (dst, src)
if awayJiggle in jigglesPerformed and \
backJiggle in jigglesPerformed:
if DEBUG >= 2: print "already performed jiggle", jiggleIndex, ", skipping"
jiggleIndex += 1
continue
# Add these jiggles to those performed
jigglesPerformed += [awayJiggle, backJiggle]
# Move to dst
if DEBUG >= 2:
print "jiggleIndex:", jiggleIndex, "location:", location,
print "relPosition:", dst
return (jiggleIndex, dst)
# --------------------------------------------------------------------------
# Loop through each loation within the radius and find all the jiggles
# Locations are encoded (x, y) and higher values move towards the top-left
location = [radius, radius] # top-left corner
offsets = [tuple(location)]
while True:
jiggleIndex = 0
# ...............................
# Next jiggle at this location
while True:
(jiggleIndex, offset) = nextJiggle(location, jiggleIndex, gJiggleOffsets,
gJigglesPerformed)
if offset is None:
break
offsets += [offset]
# ...............................
# Next location
# Next location to the right
location[0] -= shift
if location[0] >= -radius:
offsets += [tuple(location)]
continue
# Next row down, on the left
location[0] = radius
location[1] -= shift
if location[1] >= -radius:
offsets += [tuple(location)]
continue
# Exhausted all locations, break out
break
return offsets
| agpl-3.0 | -7,993,967,387,821,285,000 | 34.251497 | 87 | 0.583319 | false |
peterheim1/robbie | bin/follow_head.py | 1 | 5817 | #!/usr/bin/env python
# freely inspired by http://www.ros.org/wiki/arbotix_python/follow_controller
import roslib
import rospy, actionlib
from control_msgs.msg import FollowJointTrajectoryAction
from dynamixel_msgs.msg import JointState as JointStateDynamixel
from std_msgs.msg import Float64
class JointSubscriber():
def __init__(self,joint):
rospy.Subscriber(joint + '/state', JointStateDynamixel, self.joint_state_cb)
rospy.loginfo('Subscribing for %s joint state.',joint)
self.joint_name = joint
self.state = JointStateDynamixel()
self.received = False
def joint_state_cb(self,msg):
if self.received is False:
self.received = True
self.state = msg
def get_position(self):
return self.state.current_pos
class JointCommander():
def __init__(self,joint):
self.joint_name = joint
self.pub = rospy.Publisher(joint + '/command',Float64)
def command(self,pos):
rospy.loginfo('publishing, joint ' + self.joint_name + ', value ' + str(pos))
self.pub.publish(pos)
class FollowController():
def __init__(self):
self.ns = 'head_controller'
self.joints = ['head_pan_joint', 'head_tilt_mod_joint']
namespace = rospy.get_namespace()
rospy.loginfo('Configured for ' + str(len(self.joints)) + 'joints')
self.joint_subs = [JointSubscriber(name) for name in self.joints]
self.joint_pubs = [JointCommander(name) for name in self.joints]
self.joints_names = []
for idx in range(0,len(self.joints)):
self.joints_names.append(self.joints[idx])
# action server
self.name = self.ns + '/head_trajectory'
self.server = actionlib.SimpleActionServer(self.name, FollowJointTrajectoryAction, execute_cb=self.actionCb, auto_start=False)
rospy.loginfo("Started head_Controller ("+self.name+"). Joints: " + str(self.joints))
def startup(self):
self.server.start()
def actionCb(self, goal):
rospy.loginfo(self.name + ": Action goal recieved.")
traj = goal.trajectory
if set(self.joints_names) != set(traj.joint_names):
msg = "Trajectory joint names does not match action controlled joints." + str(traj.joint_names )
rospy.logerr(msg)
self.server.set_aborted(text=msg)
return
if not traj.points:
msg = "Trajectory empty."
rospy.logerr(msg)
self.server.set_aborted(text=msg)
return
try:
indexes = [traj.joint_names.index(joint) for joint in self.joints_names]
except ValueError as val:
msg = "Trajectory invalid."
rospy.logerr(msg)
self.server.set_aborted(text=msg)
return
if self.executeTrajectory(traj):
self.server.set_succeeded()
rospy.loginfo('Executed.')
else:
rospy.logerr('Execution failed.')
self.server.set_aborted(text="Execution failed.")
def executeTrajectory(self, traj):
rospy.loginfo("Executing trajectory with " + str(len(traj.points)) + ' point(s)')
try:
indexes = [traj.joint_names.index(joint) for joint in self.joints_names]
except ValueError as val:
return False
time = rospy.Time.now()
start = traj.header.stamp
#success = True
for point in traj.points:
if self.server.is_preempt_requested():
rospy.loginfo('Stopping head control')
self.server.set_preempted()
#success = False
break
while rospy.Time.now() + rospy.Duration(0.01) < start:
rospy.sleep(0.01)
desired = [ point.positions[k] for k in indexes ]
endtime = start + point.time_from_start
for i in range(0,len(self.joints)):
self.joint_pubs[i].command(desired[i])
while rospy.Time.now() + rospy.Duration(0.01) < endtime:
rospy.sleep(0.01)
return True
#===============================================================================
# def active(self):
# """ Is controller overriding servo internal control? """
# return self.server.is_active() or self.executing
#
# def getDiagnostics(self):
# """ Get a diagnostics status. """
# msg = DiagnosticStatus()
# msg.name = self.name
# msg.level = DiagnosticStatus.OK
# msg.message = "OK"
# if self.active():
# msg.values.append(KeyValue("State", "Active"))
# else:
# msg.values.append(KeyValue("State", "Not Active"))
# return msg
#===============================================================================
if __name__ == '__main__':
rospy.init_node('head_traj_controller', anonymous=True)
rospy.loginfo('head traj action node.')
c = FollowController()
rospy.loginfo('Starting action server')
c.startup()
rospy.loginfo('Spinning')
rospy.spin()
| gpl-3.0 | -3,832,184,821,617,121,300 | 29.615789 | 134 | 0.509025 | false |
hospace/ToughRADIUS | toughradius/radiusd/plugins/auth_rate_limit.py | 5 | 2192 | #!/usr/bin/env python
#coding=utf-8
from twisted.python import log
from toughradius.radiusd.settings import *
import datetime
def std_rate(resp,_in,_out):
input_limit = str(_in)
output_limit = str(_out)
_class = input_limit.zfill(8) + input_limit.zfill(8) + output_limit.zfill(8) + output_limit.zfill(8)
resp['Class'] = _class
return resp
def ros_rate(resp,_in,_out):
_irate = _in/1024
_orate = _out/1024
resp['Mikrotik-Rate-Limit'] = '%sk/%sk'%(_irate,_orate)
return resp
def aikuai_rate(resp,_in,_out):
_irate = _in/1024/8
_orate = _out/1024/8
resp['RP-Upstream-Speed-Limit'] = _irate
resp['RP-Downstream-Speed-Limit'] = _orate
return resp
def cisco_rate(resp,_in,_out):
return resp
def radback_rate(resp,_in,_out):
return resp
def h3c_rate(resp,_in,_out):
resp = std_rate(resp, _in, _out)
resp['H3C-Input-Average-Rate'] = _in
resp['H3C-Input-Peak-Rate'] = _in
resp['H3C-Output-Average-Rate'] = _out
resp['H3C-Output-Peak-Rate'] = _out
return resp
def zte_rate(resp,_in,_out):
resp['ZTE-Rate-Ctrl-Scr-Up'] = _in/1024
resp['ZTE-Rate-Ctrl-Scr-Down'] = _out/1024
return resp
def huawei_rate(resp,_in,_out):
resp = std_rate(resp,_in,_out)
resp['Huawei-Input-Average-Rate'] = _in
resp['Huawei-Input-Peak-Rate'] = _in
resp['Huawei-Output-Average-Rate'] = _out
resp['Huawei-Output-Peak-Rate'] = _out
return resp
rate_funcs = {
'0' : std_rate,
'9' : cisco_rate,
'2011' : huawei_rate,
'2352' : radback_rate,
'3902' : zte_rate,
'25506' : h3c_rate,
'14988' : ros_rate,
'10055' : aikuai_rate
}
def process(req=None,resp=None,user=None,radiusd=None,**kwargs):
store = radiusd.store
if store.is_white_roster(req.get_mac_addr()):
return resp
product = store.get_product(user['product_id'])
if not product:return resp
input_limit = product['input_max_limit']
output_limit = product['output_max_limit']
if input_limit == 0 and output_limit == 0:
return std_rate(resp,0,0)
_vendor = req.vendor_id or '0'
return rate_funcs[_vendor](resp,input_limit,output_limit)
| agpl-3.0 | 3,105,824,620,084,847,000 | 25.409639 | 104 | 0.618613 | false |
TeamExodus/external_chromium_org | tools/memory_inspector/memory_inspector/classification/native_heap_classifier.py | 89 | 8377 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module classifies NativeHeap objects filtering their allocations.
The only filter currently available is 'stacktrace', which works as follows:
{'name': 'rule-1', 'stacktrace': 'foo' }
{'name': 'rule-2', 'stacktrace': ['foo', r'bar\s+baz']}
{'name': 'rule-3', 'source_path': 'sk.*allocator'}
{'name': 'rule-3', 'source_path': 'sk', 'stacktrace': 'SkAllocator'}
rule-1 will match any allocation that has 'foo' in one of its stack frames.
rule-2 will match any allocation that has a stack frame matching 'foo' AND a
followed by a stack frame matching 'bar baz'. Note that order matters, so rule-2
will not match a stacktrace like ['bar baz', 'foo'].
rule-3 will match any allocation in which at least one of the source paths in
its stack frames matches the regex sk.*allocator.
rule-4 will match any allocation which satisfies both the conditions.
TODO(primiano): introduce more filters after the first prototype with UI, for
instance, filter by library file name or by allocation size.
"""
import collections
import posixpath
import re
from memory_inspector.classification import results
from memory_inspector.classification import rules
from memory_inspector.core import exceptions
from memory_inspector.core import native_heap
_RESULT_KEYS = ['bytes_allocated', 'bytes_resident']
def LoadRules(content):
"""Loads and parses a native-heap rule tree from a content (string).
Returns:
An instance of |rules.Rule|, which nodes are |_NHeapRule| instances.
"""
return rules.Load(content, _NHeapRule)
def Classify(nativeheap, rule_tree):
"""Creates aggregated results of native heaps using the provided rules.
Args:
nativeheap: the heap dump being processed (a |NativeHeap| instance).
rule_tree: the user-defined rules that define the filtering categories.
Returns:
An instance of |AggreatedResults|.
"""
assert(isinstance(nativeheap, native_heap.NativeHeap))
assert(isinstance(rule_tree, rules.Rule))
res = results.AggreatedResults(rule_tree, _RESULT_KEYS)
for allocation in nativeheap.allocations:
res.AddToMatchingNodes(allocation,
[allocation.size, allocation.resident_size])
return res
def InferHeuristicRulesFromHeap(nheap, max_depth=3, threshold=0.02):
"""Infers the rules tree from a symbolized heap snapshot.
In lack of a specific set of rules, this method can be invoked to infer a
meaningful rule tree starting from a heap snapshot. It will build a compact
radix tree from the source paths of the stack traces, which height is at most
|max_depth|, selecting only those nodes which contribute for at least
|threshold| (1.0 = 100%) w.r.t. the total allocation of the heap snapshot.
"""
assert(isinstance(nheap, native_heap.NativeHeap))
def RadixTreeInsert(node, path):
"""Inserts a string (path) into a radix tree (a python recursive dict).
e.g.: [/a/b/c, /a/b/d, /z/h] -> {'/a/b/': {'c': {}, 'd': {}}, '/z/h': {}}
"""
def GetCommonPrefix(args):
"""Returns the common prefix between two paths (no partial paths).
e.g.: /tmp/bar, /tmp/baz will return /tmp/ (and not /tmp/ba as the dumb
posixpath.commonprefix implementation would do)
"""
parts = posixpath.commonprefix(args).rpartition(posixpath.sep)[0]
return parts + posixpath.sep if parts else ''
for node_path in node.iterkeys():
pfx = GetCommonPrefix([node_path, path])
if not pfx:
continue
if len(pfx) < len(node_path):
node[pfx] = {node_path[len(pfx):] : node[node_path]}
del node[node_path]
if len(path) > len(pfx):
RadixTreeInsert(node[pfx], path[len(pfx):])
return
node[path] = {} # No common prefix, create new child in current node.
# Given an allocation of size N and its stack trace, heuristically determines
# the source directory to be blamed for the N bytes.
# The blamed_dir is the one which appears more times in the top 8 stack frames
# (excluding the first 2, which usually are just the (m|c)alloc call sites).
# At the end, this will generate a *leaderboard* (|blamed_dirs|) which
# associates, to each source path directory, the number of bytes allocated.
blamed_dirs = collections.Counter() # '/s/path' : bytes_from_this_path (int)
total_allocated = 0
for alloc in nheap.allocations:
dir_histogram = collections.Counter()
for frame in alloc.stack_trace.frames[2:10]:
# Compute a histogram (for each allocation) of the top source dirs.
if not frame.symbol or not frame.symbol.source_info:
continue
src_file = frame.symbol.source_info[0].source_file_path
src_dir = posixpath.dirname(src_file.replace('\\', '/')) + '/'
dir_histogram.update([src_dir])
if not dir_histogram:
continue
# Add the blamed dir to the leaderboard.
blamed_dir = dir_histogram.most_common()[0][0]
blamed_dirs.update({blamed_dir : alloc.size})
total_allocated += alloc.size
# Select only the top paths from the leaderboard which contribute for more
# than |threshold| and make a radix tree out of them.
radix_tree = {}
for blamed_dir, alloc_size in blamed_dirs.most_common():
if (1.0 * alloc_size / total_allocated) < threshold:
break
RadixTreeInsert(radix_tree, blamed_dir)
# The final step consists in generating a rule tree from the radix tree. This
# is a pretty straightforward tree-clone operation, they have the same shape.
def GenRulesFromRadixTree(radix_tree_node, max_depth, parent_path=''):
children = []
if max_depth > 0:
for node_path, node_children in radix_tree_node.iteritems():
child_rule = {
'name': node_path[-16:],
'source_path': '^' + re.escape(parent_path + node_path),
'children': GenRulesFromRadixTree(
node_children, max_depth - 1, parent_path + node_path)}
children += [child_rule]
return children
rules_tree = GenRulesFromRadixTree(radix_tree, max_depth)
return LoadRules(str(rules_tree))
class _NHeapRule(rules.Rule):
def __init__(self, name, filters):
super(_NHeapRule, self).__init__(name)
# The 'stacktrace' filter can be either a string (simple case, one regex) or
# a list of strings (complex case, see doc in the header of this file).
stacktrace_regexs = filters.get('stacktrace', [])
if isinstance(stacktrace_regexs, basestring):
stacktrace_regexs = [stacktrace_regexs]
self._stacktrace_regexs = []
for regex in stacktrace_regexs:
try:
self._stacktrace_regexs.append(re.compile(regex))
except re.error, descr:
raise exceptions.MemoryInspectorException(
'Stacktrace regex error "%s" : %s' % (regex, descr))
# The 'source_path' regex, instead, simply matches the source file path.
self._path_regex = None
path_regex = filters.get('source_path')
if path_regex:
try:
self._path_regex = re.compile(path_regex)
except re.error, descr:
raise exceptions.MemoryInspectorException(
'Path regex error "%s" : %s' % (path_regex, descr))
def Match(self, allocation):
# Match the source file path, if the 'source_path' filter is specified.
if self._path_regex:
path_matches = False
for frame in allocation.stack_trace.frames:
if frame.symbol and frame.symbol.source_info:
if self._path_regex.search(
frame.symbol.source_info[0].source_file_path):
path_matches = True
break
if not path_matches:
return False
# Match the stack traces symbols, if the 'stacktrace' filter is specified.
if not self._stacktrace_regexs:
return True
cur_regex_idx = 0
cur_regex = self._stacktrace_regexs[0]
for frame in allocation.stack_trace.frames:
if frame.symbol and cur_regex.search(frame.symbol.name):
# The current regex has been matched.
if cur_regex_idx == len(self._stacktrace_regexs) - 1:
return True # All the provided regexs have been matched, we're happy.
cur_regex_idx += 1
cur_regex = self._stacktrace_regexs[cur_regex_idx]
return False # Not all the provided regexs have been matched. | bsd-3-clause | 8,135,106,956,259,978,000 | 38.706161 | 80 | 0.681867 | false |
cloudwatt/contrail-controller | src/opserver/stats.py | 7 | 5232 | #!/usr/bin/python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
#
# stats
#
# Query StatsOracle info from analytics
#
import sys
import argparse
import json
import datetime
from opserver_util import OpServerUtils
from sandesh_common.vns.ttypes import Module
from sandesh_common.vns.constants import ModuleNames, NodeTypeNames
import sandesh.viz.constants as VizConstants
from pysandesh.gen_py.sandesh.ttypes import SandeshType, SandeshLevel
STAT_TABLE_LIST = [xx.stat_type + "." + xx.stat_attr for xx in VizConstants._STAT_TABLES]
class StatQuerier(object):
def __init__(self):
self._args = None
# end __init__
# Public functions
def parse_args(self):
"""
Eg. python stats.py --analytics-api-ip 127.0.0.1
--analytics-api-port 8081
--table AnalyticsCpuState.cpu_info
--where name=a6s40 cpu_info.module_id=Collector
--select "T=60 SUM(cpu_info.cpu_share)"
--sort "SUM(cpu_info.cpu_share)"
[--start-time now-10m --end-time now] | --last 10m
python stats.py --table AnalyticsCpuState.cpu_info
"""
defaults = {
'analytics_api_ip': '127.0.0.1',
'analytics_api_port': '8081',
'start_time': 'now-10m',
'end_time': 'now',
'select' : [],
'where' : [],
'sort': []
}
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.set_defaults(**defaults)
parser.add_argument("--analytics-api-ip", help="IP address of Analytics API Server")
parser.add_argument("--analytics-api-port", help="Port of Analytcis API Server")
parser.add_argument(
"--start-time", help="Logs start time (format now-10m, now-1h)")
parser.add_argument("--end-time", help="Logs end time")
parser.add_argument(
"--last", help="Logs from last time period (format 10m, 1d)")
parser.add_argument(
"--table", help="StatTable to query", choices=STAT_TABLE_LIST)
parser.add_argument(
"--dtable", help="Dynamic StatTable to query")
parser.add_argument(
"--select", help="List of Select Terms", nargs='+')
parser.add_argument(
"--where", help="List of Where Terms to be ANDed", nargs='+')
parser.add_argument(
"--sort", help="List of Sort Terms", nargs='+')
self._args = parser.parse_args()
if self._args.table is None and self._args.dtable is None:
return -1
try:
self._start_time, self._end_time = \
OpServerUtils.parse_start_end_time(
start_time = self._args.start_time,
end_time = self._args.end_time,
last = self._args.last)
except:
return -1
return 0
# end parse_args
# Public functions
def query(self):
query_url = OpServerUtils.opserver_query_url(
self._args.analytics_api_ip,
self._args.analytics_api_port)
if self._args.dtable is not None:
rtable = self._args.dtable
else:
rtable = self._args.table
query_dict = OpServerUtils.get_query_dict(
"StatTable." + rtable, str(self._start_time), str(self._end_time),
select_fields = self._args.select,
where_clause = "AND".join(self._args.where),
sort_fields = self._args.sort)
print json.dumps(query_dict)
resp = OpServerUtils.post_url_http(
query_url, json.dumps(query_dict), sync = True)
res = None
if resp is not None:
res = json.loads(resp)
res = res['value']
return res
# end query
def display(self, result):
if result == [] or result is None:
return
for res in result:
print res
# end display
# end class StatQuerier
def main():
querier = StatQuerier()
if querier.parse_args() != 0:
return
if len(querier._args.select)==0 and querier._args.dtable is None:
tab_url = "http://" + querier._args.analytics_api_ip + ":" +\
querier._args.analytics_api_port +\
"/analytics/table/StatTable." + querier._args.table
schematxt = OpServerUtils.get_url_http(tab_url + "/schema")
schema = json.loads(schematxt.text)['columns']
for pp in schema:
if pp.has_key('suffixes') and pp['suffixes']:
des = "%s %s" % (pp['name'],str(pp['suffixes']))
else:
des = "%s" % pp['name']
if pp['index']:
valuetxt = OpServerUtils.get_url_http(tab_url + "/column-values/" + pp['name'])
print "%s : %s %s" % (des,pp['datatype'], valuetxt.text)
else:
print "%s : %s" % (des,pp['datatype'])
else:
result = querier.query()
querier.display(result)
# end main
if __name__ == "__main__":
main()
| apache-2.0 | -2,767,403,548,630,975,500 | 31.90566 | 95 | 0.546827 | false |
JinfengChen/CIRCfinder | script/extract_anchor.py | 1 | 1494 | #!/usr/bin/env python
import sys
import pysam
def main():
if len(sys.argv) != 5:
print('extract_anchor.py sam fa i length')
sys.exit(0)
i = sys.argv[3]
length = int(sys.argv[4]) + 10
flag = ''
fa = pysam.Fastafile(sys.argv[2])
with open(sys.argv[1], 'r') as sam:
for line in sam:
if not line.startswith('@'):
label = line.split()[0] # intron label
if flag != label:
flag = label
read_start = open('read/' + label + '_read_start_' + i + '.fa', 'w')
read_end = open('read/' + label + '_read_end_' + i + '.fa', 'w')
pos = int(line.split()[3])
idx = pos / length * length
cigar = line.split()[1]
if cigar == '0':
offset = pos % length
else:
offset = pos % length + 10
seq = fa.fetch('index', idx, idx + length)
head = '>' + label + '_' + str(idx) + '_' + str(offset)
head += '_' + cigar + '\n'
if cigar == '0':
read_start.write(head + seq[:(offset - 1)] + '\n')
read_end.write(head + seq[(offset - 1):-10] + '\n')
else:
read_end.write(head + seq[:(offset - 1)] + '\n')
read_start.write(head + seq[(offset - 1):-10] + '\n')
if __name__ == '__main__':
main()
| mit | -3,681,476,219,729,401,000 | 35.439024 | 88 | 0.409639 | false |
Nirvedh/CoarseCoherence | src/mem/slicc/symbols/Event.py | 92 | 1754 | # Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.symbols.Symbol import Symbol
class Event(Symbol):
def __repr__(self):
return "[Event: %s]" % self.ident
__all__ = [ "Event" ]
| bsd-3-clause | 8,345,566,959,039,847,000 | 50.588235 | 72 | 0.777081 | false |
Phrozyn/MozDef | alerts/auditd_sftp.py | 2 | 1625 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
from lib.alerttask import AlertTask
from mozdef_util.query_models import SearchQuery, TermMatch, PhraseMatch
class AlertSFTPEvent(AlertTask):
def main(self):
search_query = SearchQuery(minutes=5)
search_query.add_must([
TermMatch('category', 'execve'),
TermMatch('processname', 'audisp-json'),
TermMatch('details.processname', 'ssh'),
PhraseMatch('details.parentprocess', 'sftp')
])
self.filtersManual(search_query)
self.searchEventsSimple()
self.walkEvents()
# Set alert properties
def onEvent(self, event):
category = 'execve'
severity = 'NOTICE'
tags = ['audisp-json, audit']
srchost = 'unknown'
username = 'unknown'
directory = 'unknown'
x = event['_source']
if 'hostname' in x:
srchost = x['hostname']
if 'details' in x:
if 'originaluser' in x['details']:
username = x['details']['originaluser']
if 'cwd' in x['details']:
directory = x['details']['cwd']
summary = 'SFTP Event by {0} from host {1} in directory {2}'.format(username, srchost, directory)
# Create the alert object based on these properties
return self.createAlertDict(summary, category, tags, [event], severity)
| mpl-2.0 | -5,377,241,139,030,886,000 | 32.854167 | 105 | 0.610462 | false |
auferack08/edx-platform | common/lib/xmodule/xmodule/tests/test_editing_module.py | 27 | 2425 | """ Tests for editing descriptors"""
import unittest
import os
import logging
from mock import Mock
from pkg_resources import resource_string
from opaque_keys.edx.locations import Location
from xmodule.editing_module import TabsEditingDescriptor
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from xmodule.tests import get_test_descriptor_system
log = logging.getLogger(__name__)
class TabsEditingDescriptorTestCase(unittest.TestCase):
""" Testing TabsEditingDescriptor"""
def setUp(self):
super(TabsEditingDescriptorTestCase, self).setUp()
system = get_test_descriptor_system()
system.render_template = Mock(return_value="<div>Test Template HTML</div>")
self.tabs = [
{
'name': "Test_css",
'template': "tabs/codemirror-edit.html",
'current': True,
'css': {
'scss': [resource_string(__name__,
'../../test_files/test_tabseditingdescriptor.scss')],
'css': [resource_string(__name__,
'../../test_files/test_tabseditingdescriptor.css')]
}
},
{
'name': "Subtitles",
'template': "video/subtitles.html",
},
{
'name': "Settings",
'template': "tabs/video-metadata-edit-tab.html"
}
]
TabsEditingDescriptor.tabs = self.tabs
self.descriptor = system.construct_xblock_from_class(
TabsEditingDescriptor,
scope_ids=ScopeIds(None, None, None, Location('org', 'course', 'run', 'category', 'name', 'revision')),
field_data=DictFieldData({}),
)
def test_get_css(self):
"""test get_css"""
css = self.descriptor.get_css()
test_files_dir = os.path.dirname(__file__).replace('xmodule/tests', 'test_files')
test_css_file = os.path.join(test_files_dir, 'test_tabseditingdescriptor.scss')
with open(test_css_file) as new_css:
added_css = new_css.read()
self.assertEqual(css['scss'].pop(), added_css)
self.assertEqual(css['css'].pop(), added_css)
def test_get_context(self):
""""test get_context"""
rendered_context = self.descriptor.get_context()
self.assertListEqual(rendered_context['tabs'], self.tabs)
| agpl-3.0 | -3,890,044,568,305,784,300 | 34.661765 | 115 | 0.583918 | false |
nitin-cherian/LifeLongLearning | Python/Experiments/JINJA/RealPython/jinja_env/lib/python3.5/site-packages/flask/sessions.py | 119 | 14394 | # -*- coding: utf-8 -*-
"""
flask.sessions
~~~~~~~~~~~~~~
Implements cookie based sessions based on itsdangerous.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import uuid
import hashlib
from base64 import b64encode, b64decode
from datetime import datetime
from werkzeug.http import http_date, parse_date
from werkzeug.datastructures import CallbackDict
from . import Markup, json
from ._compat import iteritems, text_type
from .helpers import total_seconds
from itsdangerous import URLSafeTimedSerializer, BadSignature
class SessionMixin(object):
"""Expands a basic dictionary with an accessors that are expected
by Flask extensions and users for the session.
"""
def _get_permanent(self):
return self.get('_permanent', False)
def _set_permanent(self, value):
self['_permanent'] = bool(value)
#: this reflects the ``'_permanent'`` key in the dict.
permanent = property(_get_permanent, _set_permanent)
del _get_permanent, _set_permanent
#: some session backends can tell you if a session is new, but that is
#: not necessarily guaranteed. Use with caution. The default mixin
#: implementation just hardcodes ``False`` in.
new = False
#: for some backends this will always be ``True``, but some backends will
#: default this to false and detect changes in the dictionary for as
#: long as changes do not happen on mutable structures in the session.
#: The default mixin implementation just hardcodes ``True`` in.
modified = True
def _tag(value):
if isinstance(value, tuple):
return {' t': [_tag(x) for x in value]}
elif isinstance(value, uuid.UUID):
return {' u': value.hex}
elif isinstance(value, bytes):
return {' b': b64encode(value).decode('ascii')}
elif callable(getattr(value, '__html__', None)):
return {' m': text_type(value.__html__())}
elif isinstance(value, list):
return [_tag(x) for x in value]
elif isinstance(value, datetime):
return {' d': http_date(value)}
elif isinstance(value, dict):
return dict((k, _tag(v)) for k, v in iteritems(value))
elif isinstance(value, str):
try:
return text_type(value)
except UnicodeError:
from flask.debughelpers import UnexpectedUnicodeError
raise UnexpectedUnicodeError(u'A byte string with '
u'non-ASCII data was passed to the session system '
u'which can only store unicode strings. Consider '
u'base64 encoding your string (String was %r)' % value)
return value
class TaggedJSONSerializer(object):
"""A customized JSON serializer that supports a few extra types that
we take for granted when serializing (tuples, markup objects, datetime).
"""
def dumps(self, value):
return json.dumps(_tag(value), separators=(',', ':'))
LOADS_MAP = {
' t': tuple,
' u': uuid.UUID,
' b': b64decode,
' m': Markup,
' d': parse_date,
}
def loads(self, value):
def object_hook(obj):
if len(obj) != 1:
return obj
the_key, the_value = next(iteritems(obj))
# Check the key for a corresponding function
return_function = self.LOADS_MAP.get(the_key)
if return_function:
# Pass the value to the function
return return_function(the_value)
# Didn't find a function for this object
return obj
return json.loads(value, object_hook=object_hook)
session_json_serializer = TaggedJSONSerializer()
class SecureCookieSession(CallbackDict, SessionMixin):
"""Base class for sessions based on signed cookies."""
def __init__(self, initial=None):
def on_update(self):
self.modified = True
CallbackDict.__init__(self, initial, on_update)
self.modified = False
class NullSession(SecureCookieSession):
"""Class used to generate nicer error messages if sessions are not
available. Will still allow read-only access to the empty session
but fail on setting.
"""
def _fail(self, *args, **kwargs):
raise RuntimeError('The session is unavailable because no secret '
'key was set. Set the secret_key on the '
'application to something unique and secret.')
__setitem__ = __delitem__ = clear = pop = popitem = \
update = setdefault = _fail
del _fail
class SessionInterface(object):
"""The basic interface you have to implement in order to replace the
default session interface which uses werkzeug's securecookie
implementation. The only methods you have to implement are
:meth:`open_session` and :meth:`save_session`, the others have
useful defaults which you don't need to change.
The session object returned by the :meth:`open_session` method has to
provide a dictionary like interface plus the properties and methods
from the :class:`SessionMixin`. We recommend just subclassing a dict
and adding that mixin::
class Session(dict, SessionMixin):
pass
If :meth:`open_session` returns ``None`` Flask will call into
:meth:`make_null_session` to create a session that acts as replacement
if the session support cannot work because some requirement is not
fulfilled. The default :class:`NullSession` class that is created
will complain that the secret key was not set.
To replace the session interface on an application all you have to do
is to assign :attr:`flask.Flask.session_interface`::
app = Flask(__name__)
app.session_interface = MySessionInterface()
.. versionadded:: 0.8
"""
#: :meth:`make_null_session` will look here for the class that should
#: be created when a null session is requested. Likewise the
#: :meth:`is_null_session` method will perform a typecheck against
#: this type.
null_session_class = NullSession
#: A flag that indicates if the session interface is pickle based.
#: This can be used by Flask extensions to make a decision in regards
#: to how to deal with the session object.
#:
#: .. versionadded:: 0.10
pickle_based = False
def make_null_session(self, app):
"""Creates a null session which acts as a replacement object if the
real session support could not be loaded due to a configuration
error. This mainly aids the user experience because the job of the
null session is to still support lookup without complaining but
modifications are answered with a helpful error message of what
failed.
This creates an instance of :attr:`null_session_class` by default.
"""
return self.null_session_class()
def is_null_session(self, obj):
"""Checks if a given object is a null session. Null sessions are
not asked to be saved.
This checks if the object is an instance of :attr:`null_session_class`
by default.
"""
return isinstance(obj, self.null_session_class)
def get_cookie_domain(self, app):
"""Helpful helper method that returns the cookie domain that should
be used for the session cookie if session cookies are used.
"""
if app.config['SESSION_COOKIE_DOMAIN'] is not None:
return app.config['SESSION_COOKIE_DOMAIN']
if app.config['SERVER_NAME'] is not None:
# chop off the port which is usually not supported by browsers
rv = '.' + app.config['SERVER_NAME'].rsplit(':', 1)[0]
# Google chrome does not like cookies set to .localhost, so
# we just go with no domain then. Flask documents anyways that
# cross domain cookies need a fully qualified domain name
if rv == '.localhost':
rv = None
# If we infer the cookie domain from the server name we need
# to check if we are in a subpath. In that case we can't
# set a cross domain cookie.
if rv is not None:
path = self.get_cookie_path(app)
if path != '/':
rv = rv.lstrip('.')
return rv
def get_cookie_path(self, app):
"""Returns the path for which the cookie should be valid. The
default implementation uses the value from the ``SESSION_COOKIE_PATH``
config var if it's set, and falls back to ``APPLICATION_ROOT`` or
uses ``/`` if it's ``None``.
"""
return app.config['SESSION_COOKIE_PATH'] or \
app.config['APPLICATION_ROOT'] or '/'
def get_cookie_httponly(self, app):
"""Returns True if the session cookie should be httponly. This
currently just returns the value of the ``SESSION_COOKIE_HTTPONLY``
config var.
"""
return app.config['SESSION_COOKIE_HTTPONLY']
def get_cookie_secure(self, app):
"""Returns True if the cookie should be secure. This currently
just returns the value of the ``SESSION_COOKIE_SECURE`` setting.
"""
return app.config['SESSION_COOKIE_SECURE']
def get_expiration_time(self, app, session):
"""A helper method that returns an expiration date for the session
or ``None`` if the session is linked to the browser session. The
default implementation returns now + the permanent session
lifetime configured on the application.
"""
if session.permanent:
return datetime.utcnow() + app.permanent_session_lifetime
def should_set_cookie(self, app, session):
"""Indicates whether a cookie should be set now or not. This is
used by session backends to figure out if they should emit a
set-cookie header or not. The default behavior is controlled by
the ``SESSION_REFRESH_EACH_REQUEST`` config variable. If
it's set to ``False`` then a cookie is only set if the session is
modified, if set to ``True`` it's always set if the session is
permanent.
This check is usually skipped if sessions get deleted.
.. versionadded:: 0.11
"""
if session.modified:
return True
save_each = app.config['SESSION_REFRESH_EACH_REQUEST']
return save_each and session.permanent
def open_session(self, app, request):
"""This method has to be implemented and must either return ``None``
in case the loading failed because of a configuration error or an
instance of a session object which implements a dictionary like
interface + the methods and attributes on :class:`SessionMixin`.
"""
raise NotImplementedError()
def save_session(self, app, session, response):
"""This is called for actual sessions returned by :meth:`open_session`
at the end of the request. This is still called during a request
context so if you absolutely need access to the request you can do
that.
"""
raise NotImplementedError()
class SecureCookieSessionInterface(SessionInterface):
"""The default session interface that stores sessions in signed cookies
through the :mod:`itsdangerous` module.
"""
#: the salt that should be applied on top of the secret key for the
#: signing of cookie based sessions.
salt = 'cookie-session'
#: the hash function to use for the signature. The default is sha1
digest_method = staticmethod(hashlib.sha1)
#: the name of the itsdangerous supported key derivation. The default
#: is hmac.
key_derivation = 'hmac'
#: A python serializer for the payload. The default is a compact
#: JSON derived serializer with support for some extra Python types
#: such as datetime objects or tuples.
serializer = session_json_serializer
session_class = SecureCookieSession
def get_signing_serializer(self, app):
if not app.secret_key:
return None
signer_kwargs = dict(
key_derivation=self.key_derivation,
digest_method=self.digest_method
)
return URLSafeTimedSerializer(app.secret_key, salt=self.salt,
serializer=self.serializer,
signer_kwargs=signer_kwargs)
def open_session(self, app, request):
s = self.get_signing_serializer(app)
if s is None:
return None
val = request.cookies.get(app.session_cookie_name)
if not val:
return self.session_class()
max_age = total_seconds(app.permanent_session_lifetime)
try:
data = s.loads(val, max_age=max_age)
return self.session_class(data)
except BadSignature:
return self.session_class()
def save_session(self, app, session, response):
domain = self.get_cookie_domain(app)
path = self.get_cookie_path(app)
# Delete case. If there is no session we bail early.
# If the session was modified to be empty we remove the
# whole cookie.
if not session:
if session.modified:
response.delete_cookie(app.session_cookie_name,
domain=domain, path=path)
return
# Modification case. There are upsides and downsides to
# emitting a set-cookie header each request. The behavior
# is controlled by the :meth:`should_set_cookie` method
# which performs a quick check to figure out if the cookie
# should be set or not. This is controlled by the
# SESSION_REFRESH_EACH_REQUEST config flag as well as
# the permanent flag on the session itself.
if not self.should_set_cookie(app, session):
return
httponly = self.get_cookie_httponly(app)
secure = self.get_cookie_secure(app)
expires = self.get_expiration_time(app, session)
val = self.get_signing_serializer(app).dumps(dict(session))
response.set_cookie(app.session_cookie_name, val,
expires=expires, httponly=httponly,
domain=domain, path=path, secure=secure)
| mit | 60,907,406,748,023,920 | 38.327869 | 78 | 0.636098 | false |
BjerknesClimateDataCentre/QuinCe | external_scripts/NRT/configure_nrt.py | 2 | 5398 | import os, logging
import toml, json
import urllib.error
import base64
from tabulate import tabulate
# Local modules
import quince, nrtdb, nrtftp
import RetrieverFactory, PreprocessorFactory
# Extract the list of IDs from a set of instruments
def get_ids(instruments):
result = []
for instrument in instruments:
result.append(instrument["id"])
return result
# Draw the table of instruments
def make_instrument_table(instruments, ids, showType):
table_data = []
if showType:
table_data = [["ID", "Name", "Owner", "Type", "Preprocessor"]]
else:
table_data = [["ID", "Name", "Owner"]]
for instrument in instruments:
draw = True
if ids is not None and instrument["id"] not in ids:
draw = False
if draw:
if showType:
table_data.append([instrument["id"],
instrument["name"],
instrument["owner"],
"None" if instrument["type"] is None
else instrument["type"],
instrument["preprocessor"]])
else:
table_data.append([instrument["id"],
instrument["name"],
instrument["owner"]])
print(tabulate(table_data, headers="firstrow"))
#######################################################
def main():
ftpconn = None
dbconn = None
try:
# Blank logger - sends everything to /dev/null
logging.basicConfig(filename=os.devnull)
logger = logging.getLogger('configure_nrt')
logger.setLevel(level=10)
with open("config.toml", "r") as config_file:
config = toml.loads(config_file.read())
print("Connecting to FTP servers...")
ftpconn = nrtftp.connect_ftp(config["FTP"])
dbconn = nrtdb.get_db_conn(config["Database"]["location"])
print("Getting QuinCe instruments...")
quince_instruments = quince.get_instruments(config)
print("Getting NRT instruments...")
nrt_instruments = nrtdb.get_instruments(dbconn)
# Check that NRT and QuinCe are in sync
quince_ids = get_ids(quince_instruments)
nrt_ids = get_ids(nrt_instruments)
# Remove old instruments from NRT
orphaned_ids = list(set(nrt_ids) - set(quince_ids))
if len(orphaned_ids) > 0:
print("The following instruments are no longer in QuinCe and will be removed:\n")
make_instrument_table(nrt_instruments, orphaned_ids, False)
go = input("Enter Y to proceed, or anything else to quit: ")
if not go.lower() == "y":
exit()
else:
nrtdb.delete_instruments(dbconn, orphaned_ids)
# Add new instruments from QuinCe
new_ids = list(set(quince_ids) - set(nrt_ids))
if len(new_ids) > 0:
print("The following instruments are new in QuinCe and will be added:\n")
make_instrument_table(quince_instruments, new_ids, False)
go = input("Enter Y to proceed, or anything else to quit: ")
if not go.lower() == "y":
exit()
else:
nrtdb.add_instruments(dbconn, quince_instruments, new_ids)
nrtftp.add_instruments(ftpconn, config["FTP"], new_ids)
# Main configuration loop
quit = False
while not quit:
print()
instruments = nrtdb.get_instruments(dbconn)
make_instrument_table(instruments, None, True)
command = input("\nEnter instrument ID to configure, or Q to quit: ").lower()
if command == "q":
quit = True
else:
instrument_id = None
try:
instrument_id = int(command)
except:
pass
if instrument_id is not None:
instrument = nrtdb.get_instrument(dbconn, instrument_id)
if instrument is not None:
retriever = None
print()
print("Current configuration for instrument %d (%s):" % \
(instrument["id"], instrument["name"]))
print()
print("TYPE: %s" % (instrument["type"]))
if instrument["type"] is not None and instrument["config"] is not None:
retriever = RetrieverFactory.get_instance(instrument["type"], \
instrument["id"], logger, json.loads(instrument["config"]))
retriever.print_configuration()
print()
print("PREPROCESSOR: %s" % instrument["preprocessor"])
print()
change = input("Change configuration (y/n)? ").lower()
if change == "y":
new_type = RetrieverFactory.ask_retriever_type()
if new_type is None:
nrtdb.store_configuration(dbconn, instrument["id"], None)
else:
if new_type != instrument["type"]:
retriever = RetrieverFactory.get_new_instance(new_type)
config_ok = False
while not config_ok:
print()
config_ok = retriever.enter_configuration()
print()
preprocessor = PreprocessorFactory.ask_preprocessor()
nrtdb.store_configuration(dbconn, instrument["id"], retriever, preprocessor)
except urllib.error.URLError as e:
print(e)
except urllib.error.HTTPError as e:
print("%s %s" % (e.code, e.reason))
finally:
if dbconn is not None:
nrtdb.close(dbconn)
if ftpconn is not None:
ftpconn.close()
if __name__ == '__main__':
main()
| gpl-3.0 | -3,764,827,567,584,822,000 | 30.202312 | 92 | 0.584661 | false |
odoousers2014/LibrERP | account_invoice_force_number/invoice.py | 2 | 1305 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2011 Agile Business Group sagl (<http://www.agilebg.com>)
# Copyright (C) 2011 Domsense srl (<http://www.domsense.com>)
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields, osv
class account_invoice(osv.osv):
_inherit = "account.invoice"
_columns = {
'internal_number': fields.char('Invoice Number', size=32, readonly=True, states={'draft':[('readonly',False)]}),
}
account_invoice()
| agpl-3.0 | -4,286,218,238,635,117,600 | 42.5 | 120 | 0.609195 | false |
paltman/nashvegas | nashvegas/management/commands/syncdb.py | 4 | 1185 | from django.core.management import call_command
from django.core.management.commands.syncdb import Command as SyncDBCommand
from optparse import make_option
class Command(SyncDBCommand):
option_list = SyncDBCommand.option_list + (
make_option('--skip-migrations',
action='store_false',
dest='migrations',
default=True,
help='Skip nashvegas migrations, do traditional syncdb'),
)
def handle_noargs(self, **options):
# Run migrations first
if options.get("database"):
databases = [options.get("database")]
else:
databases = None
migrations = options.get('migrations')
if migrations:
call_command(
"upgradedb",
do_execute=True,
databases=databases,
interactive=options.get("interactive"),
verbosity=options.get("verbosity"),
)
# Follow up with a syncdb on anything that wasnt included in migrations
# (this catches things like test-only models)
super(Command, self).handle_noargs(**options)
| mit | -2,027,725,637,822,243,000 | 33.852941 | 79 | 0.58481 | false |
Wilee999/panda3d | direct/src/controls/NonPhysicsWalker.py | 11 | 11774 | """
NonPhysicsWalker.py is for avatars.
A walker control such as this one provides:
- creation of the collision nodes
- handling the keyboard and mouse input for avatar movement
- moving the avatar
it does not:
- play sounds
- play animations
although it does send messeges that allow a listener to play sounds or
animations based on walker events.
"""
from direct.directnotify import DirectNotifyGlobal
from direct.showbase import DirectObject
from direct.controls.ControlManager import CollisionHandlerRayStart
from direct.showbase.InputStateGlobal import inputState
from direct.task.Task import Task
from pandac.PandaModules import *
class NonPhysicsWalker(DirectObject.DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory("NonPhysicsWalker")
wantDebugIndicator = base.config.GetBool('want-avatar-physics-indicator', 0)
# Ghost mode overrides this:
slideName = "slide-is-disabled"
# special methods
def __init__(self):
DirectObject.DirectObject.__init__(self)
self.worldVelocity = Vec3.zero()
self.collisionsActive = 0
self.speed=0.0
self.rotationSpeed=0.0
self.slideSpeed=0.0
self.vel=Vec3(0.0, 0.0, 0.0)
self.stopThisFrame = 0
def setWalkSpeed(self, forward, jump, reverse, rotate):
assert self.debugPrint("setWalkSpeed()")
self.avatarControlForwardSpeed=forward
#self.avatarControlJumpForce=jump
self.avatarControlReverseSpeed=reverse
self.avatarControlRotateSpeed=rotate
def getSpeeds(self):
#assert self.debugPrint("getSpeeds()")
return (self.speed, self.rotationSpeed, self.slideSpeed)
def setAvatar(self, avatar):
self.avatar = avatar
if avatar is not None:
pass # setup the avatar
def setAirborneHeightFunc(self, getAirborneHeight):
self.getAirborneHeight = getAirborneHeight
def setWallBitMask(self, bitMask):
self.cSphereBitMask = bitMask
def setFloorBitMask(self, bitMask):
self.cRayBitMask = bitMask
def swapFloorBitMask(self, oldMask, newMask):
self.cRayBitMask = self.cRayBitMask &~ oldMask
self.cRayBitMask |= newMask
if self.cRayNodePath and not self.cRayNodePath.isEmpty():
self.cRayNodePath.node().setFromCollideMask(self.cRayBitMask)
def initializeCollisions(self, collisionTraverser, avatarNodePath,
avatarRadius = 1.4, floorOffset = 1.0, reach = 1.0):
"""
Set up the avatar for collisions
"""
assert not avatarNodePath.isEmpty()
self.cTrav = collisionTraverser
self.avatarNodePath = avatarNodePath
# Set up the collision sphere
# This is a sphere on the ground to detect barrier collisions
self.cSphere = CollisionSphere(0.0, 0.0, 0.0, avatarRadius)
cSphereNode = CollisionNode('NPW.cSphereNode')
cSphereNode.addSolid(self.cSphere)
self.cSphereNodePath = avatarNodePath.attachNewNode(cSphereNode)
cSphereNode.setFromCollideMask(self.cSphereBitMask)
cSphereNode.setIntoCollideMask(BitMask32.allOff())
# Set up the collison ray
# This is a ray cast from your head down to detect floor polygons.
# This ray start is arbitrarily high in the air. Feel free to use
# a higher or lower value depending on whether you want an avatar
# that is outside of the world to step up to the floor when they
# get under valid floor:
self.cRay = CollisionRay(0.0, 0.0, CollisionHandlerRayStart, 0.0, 0.0, -1.0)
cRayNode = CollisionNode('NPW.cRayNode')
cRayNode.addSolid(self.cRay)
self.cRayNodePath = avatarNodePath.attachNewNode(cRayNode)
cRayNode.setFromCollideMask(self.cRayBitMask)
cRayNode.setIntoCollideMask(BitMask32.allOff())
# set up wall collision mechanism
self.pusher = CollisionHandlerPusher()
self.pusher.setInPattern("enter%in")
self.pusher.setOutPattern("exit%in")
# set up floor collision mechanism
self.lifter = CollisionHandlerFloor()
self.lifter.setInPattern("on-floor")
self.lifter.setOutPattern("off-floor")
self.lifter.setOffset(floorOffset)
self.lifter.setReach(reach)
# Limit our rate-of-fall with the lifter.
# If this is too low, we actually "fall" off steep stairs
# and float above them as we go down. I increased this
# from 8.0 to 16.0 to prevent this
self.lifter.setMaxVelocity(16.0)
self.pusher.addCollider(self.cSphereNodePath, avatarNodePath)
self.lifter.addCollider(self.cRayNodePath, avatarNodePath)
# activate the collider with the traverser and pusher
self.setCollisionsActive(1)
def deleteCollisions(self):
del self.cTrav
del self.cSphere
self.cSphereNodePath.removeNode()
del self.cSphereNodePath
del self.cRay
self.cRayNodePath.removeNode()
del self.cRayNodePath
del self.pusher
del self.lifter
def setTag(self, key, value):
self.cSphereNodePath.setTag(key, value)
def setCollisionsActive(self, active = 1):
assert self.debugPrint("setCollisionsActive(active%s)"%(active,))
if self.collisionsActive != active:
self.collisionsActive = active
if active:
self.cTrav.addCollider(self.cSphereNodePath, self.pusher)
self.cTrav.addCollider(self.cRayNodePath, self.lifter)
else:
self.cTrav.removeCollider(self.cSphereNodePath)
self.cTrav.removeCollider(self.cRayNodePath)
# Now that we have disabled collisions, make one more pass
# right now to ensure we aren't standing in a wall.
self.oneTimeCollide()
def placeOnFloor(self):
"""
Make a reasonable effor to place the avatar on the ground.
For example, this is useful when switching away from the
current walker.
"""
# With these on, getAirborneHeight is not returning the correct value so
# when we open our book while swimming we pop down underneath the ground
# self.oneTimeCollide()
# self.avatarNodePath.setZ(self.avatarNodePath.getZ()-self.getAirborneHeight())
# Since this is the non physics walker - wont they already be on the ground?
return
def oneTimeCollide(self):
"""
Makes one quick collision pass for the avatar, for instance as
a one-time straighten-things-up operation after collisions
have been disabled.
"""
tempCTrav = CollisionTraverser("oneTimeCollide")
tempCTrav.addCollider(self.cSphereNodePath, self.pusher)
tempCTrav.addCollider(self.cRayNodePath, self.lifter)
tempCTrav.traverse(render)
def addBlastForce(self, vector):
pass
def displayDebugInfo(self):
"""
For debug use.
"""
onScreenDebug.add("controls", "NonPhysicsWalker")
def _calcSpeeds(self):
# get the button states:
forward = inputState.isSet("forward")
reverse = inputState.isSet("reverse")
turnLeft = inputState.isSet("turnLeft")
turnRight = inputState.isSet("turnRight")
slide = inputState.isSet(self.slideName) or 0
#jump = inputState.isSet("jump")
# Check for Auto-Run
if base.localAvatar.getAutoRun():
forward = 1
reverse = 0
# Determine what the speeds are based on the buttons:
self.speed=(forward and self.avatarControlForwardSpeed or
reverse and -self.avatarControlReverseSpeed)
# Should fSlide be renamed slideButton?
self.slideSpeed=slide and ((reverse and turnLeft and -self.avatarControlReverseSpeed*(0.75)) or
(reverse and turnRight and self.avatarControlReverseSpeed*(0.75)) or
(turnLeft and -self.avatarControlForwardSpeed*(0.75)) or
(turnRight and self.avatarControlForwardSpeed*(0.75)))
self.rotationSpeed=not slide and (
(turnLeft and self.avatarControlRotateSpeed) or
(turnRight and -self.avatarControlRotateSpeed))
def handleAvatarControls(self, task):
"""
Check on the arrow keys and update the avatar.
"""
if not self.lifter.hasContact():
# hack fix for falling through the floor:
messenger.send("walkerIsOutOfWorld", [self.avatarNodePath])
self._calcSpeeds()
if __debug__:
debugRunning = inputState.isSet("debugRunning")
if debugRunning:
self.speed*=4.0
self.slideSpeed*=4.0
self.rotationSpeed*=1.25
if self.wantDebugIndicator:
self.displayDebugInfo()
# How far did we move based on the amount of time elapsed?
dt=ClockObject.getGlobalClock().getDt()
# Check to see if we're moving at all:
if self.speed or self.slideSpeed or self.rotationSpeed:
if self.stopThisFrame:
distance = 0.0
slideDistance = 0.0
rotation = 0.0
self.stopThisFrame = 0
else:
distance = dt * self.speed
slideDistance = dt * self.slideSpeed
rotation = dt * self.rotationSpeed
# Take a step in the direction of our previous heading.
self.vel=Vec3(Vec3.forward() * distance +
Vec3.right() * slideDistance)
if self.vel != Vec3.zero():
# rotMat is the rotation matrix corresponding to
# our previous heading.
rotMat=Mat3.rotateMatNormaxis(self.avatarNodePath.getH(), Vec3.up())
step=rotMat.xform(self.vel)
self.avatarNodePath.setFluidPos(Point3(self.avatarNodePath.getPos()+step))
self.avatarNodePath.setH(self.avatarNodePath.getH()+rotation)
messenger.send("avatarMoving")
else:
self.vel.set(0.0, 0.0, 0.0)
self.__oldPosDelta = self.avatarNodePath.getPosDelta(render)
self.__oldDt = dt
try:
self.worldVelocity = self.__oldPosDelta*(1/self.__oldDt)
except:
# divide by zero
self.worldVelocity = 0
return Task.cont
def doDeltaPos(self):
assert self.debugPrint("doDeltaPos()")
def reset(self):
assert self.debugPrint("reset()")
def getVelocity(self):
return self.vel
def enableAvatarControls(self):
"""
Activate the arrow keys, etc.
"""
assert self.debugPrint("enableAvatarControls")
assert self.collisionsActive
taskName = "AvatarControls-%s"%(id(self),)
# remove any old
taskMgr.remove(taskName)
# spawn the new task
taskMgr.add(self.handleAvatarControls, taskName)
def disableAvatarControls(self):
"""
Ignore the arrow keys, etc.
"""
assert self.debugPrint("disableAvatarControls")
taskName = "AvatarControls-%s"%(id(self),)
taskMgr.remove(taskName)
def flushEventHandlers(self):
if hasattr(self, 'cTrav'):
self.pusher.flush()
self.lifter.flush() # not currently defined or needed
if __debug__:
def debugPrint(self, message):
"""for debugging"""
return self.notify.debug(
str(id(self))+' '+message)
| bsd-3-clause | 5,563,674,779,308,692,000 | 35.909091 | 103 | 0.630967 | false |
dkavraal/typecatcher | typecatcher/html_preview.py | 1 | 5738 | # -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2012 Andrew Starr-Bochicchio <[email protected]>
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
from locale import gettext as _
from gi.repository import Gtk
from typecatcher_lib.helpers import get_media_file
from random import choice
def html_font_view(font=None, text=None):
start_page_icon = get_media_file("typecatcher.svg")
try:
con_icon_name = "nm-no-connection"
con_theme = Gtk.IconTheme.get_default()
con_info = con_theme.lookup_icon(con_icon_name, 64, 0)
con_icon_uri = "file://" + con_info.get_filename()
except AttributeError:
con_icon_uri = get_media_file("no-connection.svg")
try:
installed_icon_name = "gtk-apply"
installed_theme = Gtk.IconTheme.get_default()
installed_info = installed_theme.lookup_icon(installed_icon_name, 64, 0)
installed_icon_uri = "file://" + installed_info.get_filename()
except AttributeError:
installed_icon_uri = get_media_file("installed.svg")
loader = get_media_file("ajax-loader.gif")
text_preview = select_text_preview(text)
html = """
<html>
<head>
<script src="http://ajax.googleapis.com/ajax/libs/webfont/1.4.10/webfont.js"></script>
<style>
body { font-size: 36px; }
#installed { float: right; font-size: 12px; width:50px; text-align:center; display: None; }
textarea { font: inherit; font-size: inherit; border: None; overflow: hidden; outline: none; width: 90%%; height: 100%%; }
#text_preview { display: None; }
#no_connect { text-align: center; display: None; font-size: 18px; }
#start_page { text-align: center; bottom: 0px;}
.wf-loading { height: 100%%; overflow: hidden; background: url(%s) center center no-repeat fixed;}
.wf-loading * { opacity: 0; }
.wf-active body, .wf-inactive body {
-webkit-animation: fade .25s ease;
animation: fade .25s ease;
-webkit-animation-fill-mode: both;
animation-fill-mode: both;
}
@-webkit-keyframes fade {
0%% { display: none; opacity: 0; }
1%% { display: block; }
100%%{ opacity: 1; }
}
@keyframes fade {
0%% { display: none; opacity: 0; }
1%% { display: block; }
100%%{ opacity: 1; }
}
</style>
</head>
<body>
<div id="installed">
<img src="%s" width=64 height=64>
<p>%s</p>
</div>
<div id='no_connect'>
<img src="%s" width=64 height=64 > <h1>%s</h1>
<p>%s</p>
</div>
<div id='text_preview'>
%s
</div>
<div id='start_page'>
<img src="%s" width=128 height=128>
<p>TypeCatcher</p>
</div>
</body>
</html>
""" % (loader, installed_icon_uri, _("Installed"),
con_icon_uri, _("Font not available."),
_("Please check your network connection."), text_preview,
start_page_icon)
return html
def select_text_preview(text):
ipsum = """Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat."""
kafka = _("One morning, when Gregor Samsa woke from troubled dreams, he found himself transformed in his bed into a horrible vermin. He lay on his armour-like back, and if he lifted his head a little he could see his brown belly, slightly domed and divided by arches into stiff sections.")
hgg = _("Far out in the uncharted backwaters of the unfashionable end of the Western Spiral arm of the Galaxy lies a small unregarded yellow sun. Orbiting this at a distance of roughly ninety-eight million miles is an utterly insignificant little blue-green planet...")
ggm = _("Many years later, as he faced the firing squad, Colonel Aureliano Buendia was to remember that distant afternoon when his father took him to discover ice.")
ralph = _("I am an invisible man. No, I am not a spook like those who haunted Edgar Allan Poe; nor am I one of your Hollywood-movie ectoplasms. I am a man of substance, of flesh and bone, fiber and liquids — and I might even be said to possess a mind. I am invisible, understand, simply because people refuse to see me.")
jj = _("Stately, plump Buck Mulligan came from the stairhead, bearing a bowl of lather on which a mirror and a razor lay crossed. A yellow dressinggown, ungirdled, was sustained gently behind him on the mild morning air.")
text_pool = [ipsum, kafka, ggm, hgg, ralph, jj]
if text is None or text == "random":
selected_text = choice(text_pool)
return "<p> %s </p>" % selected_text
elif text == "ipsum":
return "<p> %s </p>" % ipsum
elif text == "kafka":
return "<p> %s </p>" % kafka
elif text == "hgg":
return "<p> %s </p>" % hgg
elif text == "ggm":
return "<p> %s </p>" % ggm
elif text == "ralph":
return "<p> %s </p>" % ralph
elif text == "jj":
return "<p> %s </p>" % jj
elif text == "custom":
return "<textarea> %s </textarea>" % (_('Enter text...'))
| gpl-3.0 | 248,349,567,679,156,740 | 39.111888 | 325 | 0.649059 | false |
arouzrokh1/jon | synth.py | 1 | 1436 | from pyttsx.drivers import _espeak
import ctypes
import wave
import time
import threading
import StringIO
class Synth(object):
_done = False
def __init__(self):
self.rate = _espeak.Initialize(_espeak.AUDIO_OUTPUT_RETRIEVAL, 1000)
assert self.rate != -1, 'could not initialize espeak'
_espeak.SetSynthCallback(self)
self.lock = threading.Lock()
def __call__(self, wav, numsamples, events):
if self._done:
return 0
data = ctypes.string_at(wav, numsamples*2)
if len(data) == 0:
self._done = True
return 0
self.wav.writeframes(data)
return 0
def say(self, say, out):
with self.lock:
self.wav = wave.open(out, 'w')
self.wav.setnchannels(1)
def __call__(self, wav, numsamples, events):
if self._done:
return 0
data = ctypes.string_at(wav, numsamples*2)
if len(data) == 0:
self._done = True
return 0
self.wav.writeframes(data)
return 0
def say(self, say, out):
with self.lock:
self.wav = wave.open(out, 'w')
self.wav.setnchannels(1)
self.wav.setsampwidth(2)
self.wav.setframerate(self.rate)
self._done = False
_espeak.Synth(say)
while not self._done:
time.sleep(0)
self.wav.close()
| apache-2.0 | -7,920,153,173,975,050,000 | 29.553191 | 76 | 0.547354 | false |
pincopallino93/rdfendpoints | lib/rdflib/plugins/parsers/structureddata.py | 6 | 12225 | #!/usr/bin/env python
"""
Extraction parsers for structured data embedded into HTML or XML files.
The former may include RDFa or microdata. The syntax and the extraction
procedures are based on:
* The RDFa specifications: http://www.w3.org/TR/#tr_RDFa
* The microdata specification: http://www.w3.org/TR/microdata/
* The specification of the microdata to RDF conversion:
http://www.w3.org/TR/microdata-rdf/
License: W3C Software License,
http://www.w3.org/Consortium/Legal/copyright-software
Author: Ivan Herman
Copyright: W3C
"""
from rdflib.parser import (
Parser, StringInputSource, URLInputSource, FileInputSource)
try:
import html5lib
assert html5lib
html5lib = True
except ImportError:
import warnings
warnings.warn(
'html5lib not found! RDFa and Microdata ' +
'parsers will not be available.')
html5lib = False
def _get_orig_source(source):
"""
A bit of a hack; the RDFa/microdata parsers need more than what the
upper layers of RDFLib provide...
This method returns the original source references.
"""
if isinstance(source, StringInputSource):
orig_source = source.getByteStream()
elif isinstance(source, URLInputSource):
orig_source = source.url
elif isinstance(source, FileInputSource):
orig_source = source.file.name
source.file.close()
else:
orig_source = source.getByteStream()
baseURI = source.getPublicId()
return (baseURI, orig_source)
def _check_error(graph):
from .pyRdfa import RDFA_Error, ns_rdf
from .pyRdfa.options import ns_dc
for (s, p, o) in graph.triples((None, ns_rdf["type"], RDFA_Error)):
for (x, y, msg) in graph.triples((s, ns_dc["description"], None)):
raise Exception("RDFa parsing Error! %s" % msg)
# This is the parser interface as it would look when called from the
# rest of RDFLib
class RDFaParser(Parser):
"""
Wrapper around the RDFa 1.1 parser. For further details on the RDFa 1.1
processing, see the relevant W3C documents at
http://www.w3.org/TR/#tr_RDFa. RDFa 1.1 is defined for XHTML, HTML5, SVG
and, in general, for any XML language.
Note that the parser can also handle RDFa 1.0 if the extra parameter is
used and/or the input source uses RDFa 1.0 specific @version or DTD-s.
"""
def parse(self, source, graph,
pgraph=None,
media_type="",
rdfa_version=None,
embedded_rdf=False,
vocab_expansion=False, vocab_cache=False):
"""
@param source: one of the input sources that the RDFLib package defined
@type source: InputSource class instance
@param graph: target graph for the triples; output graph, in RDFa spec.
parlance
@type graph: RDFLib Graph
@keyword pgraph: target for error and warning triples; processor graph,
in RDFa spec. parlance. If set to None, these triples are ignored
@type pgraph: RDFLib Graph
@keyword media_type: explicit setting of the preferred media type
(a.k.a. content type) of the the RDFa source. None means the content
type of the HTTP result is used, or a guess is made based on the
suffix of a file
@type media_type: string
@keyword rdfa_version: 1.0 or 1.1. If the value is "", then, by
default, 1.1 is used unless the source has explicit signals to use
1.0 (e.g., using a @version attribute, using a DTD set up for 1.0, etc)
@type rdfa_version: string
@keyword embedded_rdf: some formats allow embedding RDF in other
formats: (X)HTML can contain turtle in a special <script> element,
SVG can have RDF/XML embedded in a <metadata> element. This flag
controls whether those triples should be interpreted and added to
the output graph. Some languages (e.g., SVG) require this, and the
flag is ignored.
@type embedded_rdf: Boolean
@keyword vocab_expansion: whether the RDFa @vocab attribute should
also mean vocabulary expansion (see the RDFa 1.1 spec for further
details)
@type vocab_expansion: Boolean
@keyword vocab_cache: in case vocab expansion is used, whether the
expansion data (i.e., vocabulary) should be cached locally. This
requires the ability for the local application to write on the
local file system
@type vocab_chache: Boolean
"""
if html5lib is False:
raise ImportError(
'html5lib is not installed, cannot use ' +
'RDFa and Microdata parsers.')
(baseURI, orig_source) = _get_orig_source(source)
self._process(graph, pgraph, baseURI, orig_source,
media_type=media_type,
rdfa_version=rdfa_version,
embedded_rdf=embedded_rdf,
vocab_expansion=vocab_expansion, vocab_cache=vocab_cache)
def _process(self, graph, pgraph, baseURI, orig_source,
media_type="",
rdfa_version=None,
embedded_rdf=False,
vocab_expansion=False, vocab_cache=False):
from .pyRdfa import pyRdfa, Options
from rdflib import Graph
processor_graph = pgraph if pgraph is not None else Graph()
self.options = Options(output_processor_graph=True,
embedded_rdf=embedded_rdf,
vocab_expansion=vocab_expansion,
vocab_cache=vocab_cache)
if media_type is None:
media_type = ""
processor = pyRdfa(self.options,
base=baseURI,
media_type=media_type,
rdfa_version=rdfa_version)
processor.graph_from_source(orig_source, graph=graph, pgraph=processor_graph, rdfOutput=False)
# This may result in an exception if the graph parsing led to an error
_check_error(processor_graph)
class RDFa10Parser(Parser):
"""
This is just a convenience class to wrap around the RDFa 1.0 parser.
"""
def parse(self, source, graph, pgraph=None, media_type=""):
"""
@param source: one of the input sources that the RDFLib package defined
@type source: InputSource class instance
@param graph: target graph for the triples; output graph, in RDFa
spec. parlance
@type graph: RDFLib Graph
@keyword pgraph: target for error and warning triples; processor
graph, in RDFa spec. parlance. If set to None, these triples are
ignored
@type pgraph: RDFLib Graph
@keyword media_type: explicit setting of the preferred media type
(a.k.a. content type) of the the RDFa source. None means the content
type of the HTTP result is used, or a guess is made based on the
suffix of a file
@type media_type: string
@keyword rdfOutput: whether Exceptions should be catched and added,
as triples, to the processor graph, or whether they should be raised.
@type rdfOutput: Boolean
"""
RDFaParser().parse(source, graph, pgraph=pgraph,
media_type=media_type, rdfa_version="1.0")
class MicrodataParser(Parser):
"""
Wrapper around an HTML5 microdata, extracted and converted into RDF. For
the specification of microdata, see the relevant section of the HTML5
spec: http://www.w3.org/TR/microdata/; for the algorithm used to extract
microdata into RDF, see http://www.w3.org/TR/microdata-rdf/.
"""
def parse(self, source, graph, vocab_expansion=False, vocab_cache=False):
"""
@param source: one of the input sources that the RDFLib package defined
@type source: InputSource class instance
@param graph: target graph for the triples; output graph, in RDFa
spec. parlance
@type graph: RDFLib Graph
@keyword vocab_expansion: whether the RDFa @vocab attribute should
also mean vocabulary expansion (see the RDFa 1.1 spec for further
details)
@type vocab_expansion: Boolean
@keyword vocab_cache: in case vocab expansion is used, whether the
expansion data (i.e., vocabulary) should be cached locally. This
requires the ability for the local application to write on the
local file system
@type vocab_chache: Boolean
@keyword rdfOutput: whether Exceptions should be catched and added,
as triples, to the processor graph, or whether they should be raised.
@type rdfOutput: Boolean
"""
if html5lib is False:
raise ImportError(
'html5lib is not installed, cannot use RDFa ' +
'and Microdata parsers.')
(baseURI, orig_source) = _get_orig_source(source)
self._process(graph, baseURI, orig_source,
vocab_expansion=vocab_expansion,
vocab_cache=vocab_cache)
def _process(self, graph, baseURI, orig_source,
vocab_expansion=False, vocab_cache=False):
from .pyMicrodata import pyMicrodata
processor = pyMicrodata(base=baseURI, vocab_expansion=vocab_expansion,
vocab_cache=vocab_cache)
processor.graph_from_source(
orig_source, graph=graph, rdfOutput=False)
class StructuredDataParser(Parser):
"""
Convenience parser to extract both RDFa (including embedded Turtle)
and microdata from an HTML file.
It is simply a wrapper around the specific parsers.
"""
def parse(self, source, graph,
pgraph=None,
rdfa_version="",
vocab_expansion=False,
vocab_cache=False,
media_type='text/html'
):
"""
@param source: one of the input sources that the RDFLib package defined
@type source: InputSource class instance
@param graph: target graph for the triples; output graph, in RDFa
spec. parlance
@keyword rdfa_version: 1.0 or 1.1. If the value is "", then, by
default, 1.1 is used unless the source has explicit signals to use 1.0
(e.g., using a @version attribute, using a DTD set up for 1.0, etc)
@type rdfa_version: string
@type graph: RDFLib Graph
@keyword pgraph: target for error and warning triples; processor
graph, in RDFa spec. parlance. If set to None, these triples are
ignored
@type pgraph: RDFLib Graph
@keyword vocab_expansion: whether the RDFa @vocab attribute should
also mean vocabulary expansion (see the RDFa 1.1 spec for further
details)
@type vocab_expansion: Boolean
@keyword vocab_cache: in case vocab expansion is used, whether the
expansion data (i.e., vocabulary) should be cached locally. This
requires the ability for the local application to write on the
local file system
@type vocab_chache: Boolean
@keyword rdfOutput: whether Exceptions should be catched and added,
as triples, to the processor graph, or whether they should be raised.
@type rdfOutput: Boolean
"""
# Note that the media_type argument is ignored, and is here only to avoid an 'unexpected argument' error.
# This parser works for text/html only anyway...
(baseURI, orig_source) = _get_orig_source(source)
if rdfa_version == "" : rdfa_version = "1.1"
RDFaParser()._process(graph, pgraph, baseURI, orig_source,
media_type='text/html',
rdfa_version=rdfa_version,
vocab_expansion=vocab_expansion,
vocab_cache=vocab_cache)
MicrodataParser()._process(graph, baseURI, orig_source,
vocab_expansion=vocab_expansion,
vocab_cache=vocab_cache)
from .hturtle import HTurtleParser
HTurtleParser()._process(graph, baseURI, orig_source, media_type='text/html')
| apache-2.0 | 3,758,386,639,521,115,600 | 42.97482 | 114 | 0.632638 | false |
nkalodimas/invenio | modules/websession/lib/websession_web_tests.py | 27 | 12821 | # -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""WebMessage module web tests."""
from invenio.config import CFG_SITE_SECURE_URL
from invenio.testutils import make_test_suite, \
run_test_suite, \
InvenioWebTestCase
class InvenioWebSessionWebTest(InvenioWebTestCase):
"""WebSession web tests."""
def _delete_messages(self):
"""Delete all messages from users inbox"""
self.find_element_by_link_text_with_timeout("Personalize")
self.browser.find_element_by_link_text("Personalize").click()
self.find_element_by_link_text_with_timeout("Your Messages")
self.browser.find_element_by_link_text("Your Messages").click()
self.find_element_by_xpath_with_timeout("//input[@name='del_all' and @value='Delete All']")
self.browser.find_element_by_xpath("//input[@name='del_all' and @value='Delete All']").click()
self.handle_popup_dialog()
self.find_element_by_xpath_with_timeout("//input[@value='Yes']")
self.browser.find_element_by_xpath("//input[@value='Yes']").click()
def test_create_group(self):
"""websession - web test create a group"""
self.browser.get(CFG_SITE_SECURE_URL)
# login as romeo
self.login(username="romeo", password="r123omeo")
self.find_element_by_link_text_with_timeout("Personalize")
self.browser.find_element_by_link_text("Personalize").click()
self.find_element_by_link_text_with_timeout("Your Groups")
self.browser.find_element_by_link_text("Your Groups").click()
self.find_element_by_name_with_timeout("create_group")
self.browser.find_element_by_name("create_group").click()
self.fill_textbox(textbox_name="group_name", text="group test")
self.fill_textbox(textbox_name="group_description", text="test")
self.choose_selectbox_option_by_label(selectbox_name="join_policy", label="Visible and open for new members")
self.find_element_by_name_with_timeout("create_button")
self.browser.find_element_by_name("create_button").click()
self.handle_popup_dialog()
self.page_source_test(expected_text='You have successfully created a new group.')
self.find_element_by_xpath_with_timeout("//small")
self.browser.find_element_by_xpath("//small").click()
self.find_element_by_name_with_timeout("delete")
self.browser.find_element_by_name("delete").click()
self.find_element_by_name_with_timeout("delete")
self.browser.find_element_by_name("delete").click()
self.page_source_test(expected_text='You have successfully deleted a group.')
self._delete_messages()
self.logout()
def test_message_group(self):
"""websession - web test send a message to any group"""
self.browser.get(CFG_SITE_SECURE_URL)
# login as juliet
self.login(username="juliet", password="j123uliet")
self.find_element_by_link_text_with_timeout("Personalize")
self.browser.find_element_by_link_text("Personalize").click()
self.find_element_by_link_text_with_timeout("Your Messages")
self.browser.find_element_by_link_text("Your Messages").click()
self.find_element_by_name_with_timeout("del_all")
self.browser.find_element_by_name("del_all").click()
self.handle_popup_dialog()
self.fill_textbox(textbox_name="search_pattern", text="montague-family")
self.find_element_by_name_with_timeout("search_group")
self.browser.find_element_by_name("search_group").click()
self.choose_selectbox_option_by_label(selectbox_name="names_selected", label="montague-family")
self.find_element_by_name_with_timeout("add_group")
self.browser.find_element_by_name("add_group").click()
self.fill_textbox(textbox_name="msg_subject", text="hello everybody")
self.fill_textbox(textbox_name="msg_body", text="hello")
self.find_element_by_name_with_timeout("send_button")
self.browser.find_element_by_name("send_button").click()
self._delete_messages()
self.logout()
# login as romeo
self.login(username="romeo", password="r123omeo")
self.find_element_by_link_text_with_timeout("Your Messages")
self.browser.find_element_by_link_text("Your Messages").click()
self.find_element_by_link_text_with_timeout("hello everybody")
self.browser.find_element_by_link_text("hello everybody").click()
self.page_source_test(expected_text='hello')
self.find_element_by_name_with_timeout("delete")
self.browser.find_element_by_name("delete").click()
self.logout()
def test_create_open_group(self):
"""websession - web test create an open group and join it"""
self.browser.get(CFG_SITE_SECURE_URL)
# login as juliet
self.login(username="juliet", password="j123uliet")
self.find_element_by_link_text_with_timeout("Personalize")
self.browser.find_element_by_link_text("Personalize").click()
self.find_element_by_link_text_with_timeout("Your Groups")
self.browser.find_element_by_link_text("Your Groups").click()
self.find_element_by_name_with_timeout("create_group")
self.browser.find_element_by_name("create_group").click()
self.fill_textbox(textbox_name="group_name", text="my_friends")
self.fill_textbox(textbox_name="group_description", text="all my friends")
self.choose_selectbox_option_by_label(selectbox_name="join_policy", label="Visible and open for new members")
self.find_element_by_name_with_timeout("create_button")
self.browser.find_element_by_name("create_button").click()
self.handle_popup_dialog()
self.logout()
# login as romeo
self.login(username="romeo", password="r123omeo")
self.find_element_by_link_text_with_timeout("Your Groups")
self.browser.find_element_by_link_text("Your Groups").click()
self.find_element_by_name_with_timeout("join_group")
self.browser.find_element_by_name("join_group").click()
self.choose_selectbox_option_by_label(selectbox_name="grpID", label="my_friends")
self.find_element_by_name_with_timeout("join_button")
self.browser.find_element_by_name("join_button").click()
self.handle_popup_dialog()
self.logout()
# login as hyde
self.login(username="hyde", password="h123yde")
self.find_element_by_link_text_with_timeout("Your Groups")
self.browser.find_element_by_link_text("Your Groups").click()
self.find_element_by_name_with_timeout("join_group")
self.browser.find_element_by_name("join_group").click()
self.choose_selectbox_option_by_label(selectbox_name="grpID", label="my_friends")
self.find_element_by_name_with_timeout("join_button")
self.browser.find_element_by_name("join_button").click()
self.handle_popup_dialog()
self.logout()
# login as romeo
self.login(username="romeo", password="r123omeo")
self.find_element_by_link_text_with_timeout("Your Groups")
self.browser.find_element_by_link_text("Your Groups").click()
self.find_element_by_name_with_timeout("leave")
self.browser.find_element_by_name("leave").click()
self.choose_selectbox_option_by_label(selectbox_name="grpID", label="my_friends")
self.find_element_by_name_with_timeout("leave_button")
self.browser.find_element_by_name("leave_button").click()
self.handle_popup_dialog()
self.find_element_by_name_with_timeout("leave_button")
self.browser.find_element_by_name("leave_button").click()
self.logout()
# login as juliet
self.login(username="juliet", password="j123uliet")
self.find_element_by_link_text_with_timeout("Your Groups")
self.browser.find_element_by_link_text("Your Groups").click()
self.find_element_by_xpath_with_timeout("//small")
self.browser.find_element_by_xpath("//small").click()
self.find_element_by_name_with_timeout("delete")
self.browser.find_element_by_name("delete").click()
self.find_element_by_name_with_timeout("delete")
self.browser.find_element_by_name("delete").click()
self._delete_messages()
self.logout()
# login as hyde
self.login(username="hyde", password="h123yde")
self._delete_messages()
self.logout()
def test_set_group(self):
"""websession - web test set group"""
self.browser.get(CFG_SITE_SECURE_URL)
# login as juliet
self.login(username="juliet", password="j123uliet")
self.find_element_by_link_text_with_timeout("Personalize")
self.browser.find_element_by_link_text("Personalize").click()
self.find_element_by_link_text_with_timeout("Your Groups")
self.browser.find_element_by_link_text("Your Groups").click()
self.find_element_by_name_with_timeout("create_group")
self.browser.find_element_by_name("create_group").click()
self.fill_textbox(textbox_name="group_name", text="my_friends")
self.fill_textbox(textbox_name="group_description", text="all my friends")
self.choose_selectbox_option_by_label(selectbox_name="join_policy", label="Visible but new members need approval")
self.find_element_by_name_with_timeout("create_button")
self.browser.find_element_by_name("create_button").click()
self.handle_popup_dialog()
self.logout()
# login as romeo
self.login(username="romeo", password="r123omeo")
self.find_element_by_link_text_with_timeout("Your Groups")
self.browser.find_element_by_link_text("Your Groups").click()
self.find_element_by_name_with_timeout("join_group")
self.browser.find_element_by_name("join_group").click()
self.fill_textbox(textbox_name="group_name", text="my_friends")
self.find_element_by_name_with_timeout("join_button")
self.browser.find_element_by_name("join_button").click()
self.handle_popup_dialog()
self.choose_selectbox_option_by_label(selectbox_name="grpID", label="my_friends")
self.find_element_by_name_with_timeout("join_button")
self.browser.find_element_by_name("join_button").click()
self.logout()
# login as juliet
self.login(username="juliet", password="j123uliet")
self.find_element_by_link_text_with_timeout("Your Messages")
self.browser.find_element_by_link_text("Your Messages").click()
self.find_element_by_link_text_with_timeout("Group my_friends: New membership request")
self.browser.find_element_by_link_text("Group my_friends: New membership request").click()
self.find_element_by_link_text_with_timeout("accept or reject")
self.browser.find_element_by_link_text("accept or reject").click()
self.choose_selectbox_option_by_label(selectbox_name="pending_member_id", label="romeo")
self.find_element_by_name_with_timeout("add_member")
self.browser.find_element_by_name("add_member").click()
self.find_element_by_link_text_with_timeout("Your Groups")
self.browser.find_element_by_link_text("Your Groups").click()
self.find_element_by_xpath_with_timeout("//small")
self.browser.find_element_by_xpath("//small").click()
self.find_element_by_name_with_timeout("delete")
self.browser.find_element_by_name("delete").click()
self.find_element_by_name_with_timeout("delete")
self.browser.find_element_by_name("delete").click()
self._delete_messages()
self.logout()
# login as romeo
self.login(username="romeo", password="r123omeo")
self._delete_messages()
self.logout()
TEST_SUITE = make_test_suite(InvenioWebSessionWebTest, )
if __name__ == '__main__':
run_test_suite(TEST_SUITE, warn_user=True)
| gpl-2.0 | 1,825,924,449,962,784,500 | 51.979339 | 122 | 0.663365 | false |
mozilla-metrics/socorro-toolbox | src/main/python/graphics-correlations.py | 1 | 6775 | from optparse import OptionParser
import psycopg2
import os, sys
import csv
import re
from collections import namedtuple
p = OptionParser(usage='usage: %prog [options] outfile')
p.add_option('-V', '--product-version', dest='productversion',
help='Firefox version', default=None)
p.add_option('-s', '--start-date', dest='startdate',
help='Start date (YYYY-MM-DD)', default=None)
p.add_option('-e', '--end-date', dest='enddate',
help='End date (YYYY-MM-DD)', default=None)
p.add_option('-c', '--cutoff', dest='cutoff',
help="Minimum cutoff", default=50)
opts, args = p.parse_args()
if len(args) != 1 or opts.productversion is None or opts.startdate is None or opts.enddate is None:
p.error("Required arguments missing")
sys.exit(1)
datere = re.compile(r'\d{4}-\d{2}-\d{2}$')
if datere.match(opts.startdate) is None or datere.match(opts.enddate) is None:
p.print_usage()
sys.exit(1)
outpattern, = args
connectionstring = open(os.path.expanduser('~/socorro.connection'), 'r').read().strip()
conn = psycopg2.connect(connectionstring)
cur = conn.cursor()
cur.execute('''
SELECT product_version_id
FROM product_versions
WHERE product_name = 'Firefox' AND version_string = %s''', (opts.productversion,))
if cur.rowcount != 1:
print >>sys.stderr, "More than one build has version '%s'" % opts.productversion
sys.exit(2)
productversion, = cur.fetchone()
# Note: the startdate/enddate are substituted by python into the query string.
# This is required so that the partition optimizer can exclude irrelevant
# partitions. The product version is passed as a bound parameter.
byvendorq = """
WITH BySignatureGraphics AS (
SELECT
signature_id,
substring(r.app_notes from E'AdapterVendorID: (?:0x)?(\\\\w+)') AS AdapterVendorID,
COUNT(*) AS c
FROM
reports r JOIN reports_clean rc ON rc.uuid = r.uuid
WHERE
r.date_processed > '%(startdate)s'
AND r.date_processed < '%(enddate)s'
AND rc.date_processed > '%(startdate)s'
AND rc.date_processed < '%(enddate)s'
AND r.app_notes ~ E'AdapterVendorID: (?:0x)?(\\\\w+)'
AND r.os_name ~ 'Windows NT'
AND r.hangid IS NULL
AND rc.product_version_id = %%(product_version_id)s
GROUP BY signature_id, AdapterVendorID
),
BySignature AS (
SELECT
signature_id,
SUM(c) AS total_by_signature
FROM BySignatureGraphics
GROUP BY signature_id
),
ByGraphics AS (
SELECT
AdapterVendorID,
SUM(c) AS total_by_graphics
FROM BySignatureGraphics
GROUP BY AdapterVendorID
),
AllCrashes AS (
SELECT SUM(c) AS grand_total
FROM BySignatureGraphics
),
Correlations AS (
SELECT
s.signature,
bsg.AdapterVendorID,
bsg.c,
total_by_signature,
total_by_graphics,
(bsg.c::float * grand_total::float) /
(total_by_signature::float * total_by_graphics::float) AS correlation,
grand_total
FROM
BySignatureGraphics AS bsg
JOIN BySignature ON BySignature.signature_id = bsg.signature_id
JOIN ByGraphics ON
ByGraphics.AdapterVendorID = bsg.AdapterVendorID
CROSS JOIN AllCrashes
JOIN signatures AS s
ON bsg.signature_id = s.signature_id
WHERE
bsg.c > 25
AND total_by_signature > %%(cutoff)s
AND bsg.AdapterVendorID != '0000'
),
HighCorrelation AS (
SELECT
signature,
MAX(correlation) AS highcorrelation
FROM Correlations
GROUP BY signature
)
SELECT
Correlations.*
FROM
Correlations
JOIN HighCorrelation ON
Correlations.signature = HighCorrelation.signature
""" % {'startdate': opts.startdate, 'enddate': opts.enddate}
bycpufamilyq = """
WITH BySignatureCPU AS (
SELECT
signature_id,
substring(r.cpu_info FROM E'\\\\A(\\\\w+ family \\\\d+) model \\\\d+ stepping \\\\d+( \\\\| \\\\d+)?\\\\Z') AS cpufamily,
COUNT(*) AS c
FROM
reports r JOIN reports_clean rc on rc.uuid = r.uuid
WHERE
r.date_processed > '%(startdate)s'
AND r.date_processed < '%(enddate)s'
AND rc.date_processed > '%(startdate)s'
AND rc.date_processed < '%(enddate)s'
AND r.os_name ~ 'Windows NT'
AND r.hangid IS NULL
AND rc.product_version_id = %%(product_version_id)s
AND r.cpu_info ~ E'\\\\A(\\\\w+ family \\\\d+) model \\\\d+ stepping \\\\d+( \\\\| \\\\d+)?\\\\Z'
GROUP BY signature_id, cpufamily
),
BySignature AS (
SELECT
signature_id,
SUM(c) AS total_by_signature
FROM BySignatureCPU
GROUP BY signature_id
),
ByCPU AS (
SELECT
cpufamily,
SUM(c) AS total_by_cpu
FROM BySignatureCPU
GROUP BY cpufamily
),
AllCrashes AS (
SELECT SUM(c) AS grand_total
FROM BySignatureCPU
),
Correlations AS (
SELECT
s.signature,
bsg.cpufamily,
bsg.c,
total_by_signature,
total_by_cpu,
(bsg.c::float * grand_total::float) /
(total_by_signature::float * total_by_cpu::float) AS correlation,
grand_total
FROM
BySignatureCPU AS bsg
JOIN BySignature ON BySignature.signature_id = bsg.signature_id
JOIN ByCPU ON
ByCPU.cpufamily = bsg.cpufamily
CROSS JOIN AllCrashes
JOIN signatures AS s
ON bsg.signature_id = s.signature_id
WHERE
total_by_signature > %%(cutoff)s
),
HighCorrelation AS (
SELECT
signature,
MAX(correlation) AS highcorrelation
FROM Correlations
GROUP BY signature
)
SELECT
Correlations.*
FROM
Correlations
JOIN HighCorrelation ON
Correlations.signature = HighCorrelation.signature
WHERE highcorrelation > 2
""" % {'startdate': opts.startdate, 'enddate': opts.enddate}
Result = namedtuple('Result', ('signature', 'vendorid', 'c', 'bysig', 'bygraphics', 'grandtotal', 'correlation'))
def savedata(cur, filename):
results = []
for signature, vendorid, c, bysig, bygraphics, correlation, grandtotal in cur:
# if bygraph < 50:
# continue
# Also, if less than 20 (SWAG!) unique users have experienced this crash,
# the correlation is going to be really noisy and probably meaningless.
# if correlation < 4:
# continue
results.append(Result(signature, vendorid, c, bysig, bygraphics, grandtotal, correlation))
results.sort(key=lambda r: (r.bysig, r.signature, r.correlation), reverse=True)
fd = open(filename, 'w')
w = csv.writer(fd)
w.writerow(('signature', 'vendorid', 'c', 'bysig', 'bygraphics', 'grandtotal', 'correlation'))
for r in results:
correlation = "{0:.2f}".format(r.correlation)
w.writerow((r.signature, r.vendorid, r.c, r.bysig, r.bygraphics, r.grandtotal, correlation))
fd.close()
params = {
'product_version_id': productversion,
'cutoff': opts.cutoff
}
cur.execute(byvendorq, params)
savedata(cur, outpattern + 'byadaptervendor.csv')
cur.execute(bycpufamilyq, params)
savedata(cur, outpattern + 'bycpufamily.csv')
| apache-2.0 | -4,394,518,590,229,906,400 | 27.829787 | 125 | 0.671144 | false |
mdtraj/mdtraj | mdtraj/__init__.py | 6 | 2628 | ##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
"""The mdtraj package contains tools for loading and saving molecular dynamics
trajectories in a variety of formats, including Gromacs XTC & TRR, CHARMM/NAMD
DCD, AMBER BINPOS, PDB, and HDF5.
"""
import numpy as _ # silence cython related numpy warnings, see github.com/numpy/numpy/pull/432
from .formats.registry import FormatRegistry
from .formats.xtc import load_xtc
from .formats.trr import load_trr
from .formats.hdf5 import load_hdf5
from .formats.lh5 import load_lh5
from .formats.netcdf import load_netcdf
from .formats.mdcrd import load_mdcrd
from .formats.dcd import load_dcd
from .formats.binpos import load_binpos
from .formats.pdb import load_pdb
from .formats.arc import load_arc
from .formats.openmmxml import load_xml
from .formats.prmtop import load_prmtop
from .formats.psf import load_psf
from .formats.mol2 import load_mol2
from .formats.amberrst import load_restrt, load_ncrestrt
from .formats.lammpstrj import load_lammpstrj
from .formats.dtr import load_dtr, load_stk
from .formats.xyzfile import load_xyz
from .formats.hoomdxml import load_hoomdxml
from .formats.tng import load_tng
from .core import element
from ._rmsd import rmsd, rmsf
from ._lprmsd import lprmsd
from .core.topology import Topology, Single, Double, Triple, Amide, Aromatic
from .geometry import *
from .core.trajectory import *
from .nmr import *
from . import reporters
def capi():
import os
import sys
module_path = sys.modules['mdtraj'].__path__[0]
return {
'lib_dir': os.path.join(module_path, 'core', 'lib'),
'include_dir': os.path.join(module_path, 'core', 'lib'),
}
| lgpl-2.1 | -5,419,075,364,981,252,000 | 36.542857 | 95 | 0.714612 | false |
decebel/dataAtom_alpha | bin/plug/py/external/pattern/text/search.py | 1 | 41884 | #### PATTERN | EN | PATTERN MATCHING ###############################################################
# Copyright (c) 2010 University of Antwerp, Belgium
# Author: Tom De Smedt <[email protected]>
# License: BSD (see LICENSE.txt for details).
# http://www.clips.ua.ac.be/pages/pattern
####################################################################################################
import re
import itertools
# Following classes emulate those in en.parser.tree:
class Text(list):
def __init__(self, string="", token=["word"]):
list.__init__(self, [Sentence(s+".", token) for s in string.split(".")])
@property
def sentences(self):
return self
class Sentence:
def __init__(self, string="", token=["word"]):
for punctuation in ".,;?!:": # Naive tokenization.
string = string.replace(punctuation, " "+punctuation)
string = re.sub(r"\s+", " ", string)
string = string.split(" ")
self.words = [Word(self, w, index=i) for i,w in enumerate(string)]
@property
def chunks(self):
return []
class Word(object):
def __init__(self, sentence, string, tag=None, index=0):
self.sentence, self.string, self.tag, self.index = sentence, string, tag, index
def __repr__(self):
return "Word(%s)" % repr(self.string)
def _get_type(self):
return self.tag
def _set_type(self, v):
self.tag = v
type = property(_get_type, _set_type)
@property
def chunk(self):
return None
@property
def lemma(self):
return None
#--- STRING MATCHING -------------------------------------------------------------------------------
WILDCARD = "*"
regexp = type(re.compile(r"."))
def _match(string, pattern):
""" Returns True if the pattern is part of the given word string.
The pattern can include a wildcard (*front, back*, *both*, in*side),
or it can be a compiled regular expression.
"""
p = pattern
try:
if p.startswith(WILDCARD) and (p.endswith(WILDCARD) and p[1:-1] in string or string.endswith(p[1:])):
return True
if p.endswith(WILDCARD) and not p.endswith("\\"+WILDCARD) and string.startswith(p[:-1]):
return True
if p == string:
return True
if WILDCARD in p[1:-1]:
p = p.split(WILDCARD)
return string.startswith(p[0]) and string.endswith(p[-1])
except AttributeError:
# For performance (doing isinstance() at the end is 10% faster for regular strings).
if isinstance(p, regexp):
return p.search(string) is not None
return False
#--- LIST FUNCTIONS --------------------------------------------------------------------------------
def unique(list):
""" Returns a list copy in which each item occurs only once.
"""
u=[]; [u.append(item) for item in list if item not in u]
return u
def unique2(list):
""" Returns a list copy in which each item occurs only once.
This is faster than unique(), but list items must be hashable.
"""
u, s = [], {}
for item in list:
if item not in s: u.append(item)
s[item] = True
return u
def find(function, list):
""" Returns the first item in the list for which function(item) is True, None otherwise.
"""
for item in list:
if function(item) is True:
return item
def combinations(list, n):
""" Returns all possible combinations of length n of the given items in the list.
"""
if n == 0: yield [] # Only one possibility, the empty list.
else:
for i in xrange(len(list)):
for c in combinations(list, n-1):
yield [list[i]] + c
def variations(list, optional=lambda item: False):
""" Returns all possible variations of a list containing optional items.
"""
# Boolean pattern, True where pattern is optional.
# (A) (B) C --> True True False
o = [optional(item) for item in list]
V = []
# All the possible True/False combinations of optionals.
# (A) (B) C --> True True, True False, False True, False False.
for c in combinations([True, False], sum(o)):
# If True in boolean pattern, replace by boolean in current combination.
# (A) (B) C --> True True False, True False False, False True False, False False False.
v = [b and (b and c.pop(0)) for b in o]
# Replace True by pattern at that index.
# --> (A) (B) C, (A) C, (B) C, C.
v = [list[i] for i in range(len(v)) if not v[i]]
if v not in V:
V.append(v)
# Longest-first.
V.sort(lambda a, b: len(b) - len(a))
return V
#### TAXONOMY ######################################################################################
#--- ORDERED DICTIONARY ----------------------------------------------------------------------------
class odict(dict):
""" Dictionary with ordered keys.
With reversed=True, the latest keys will be returned first when traversing the dictionary.
Taxonomy (see below) uses ordered dictionaries:
if a taxonomy term has multiple parents, the last parent is the default.
"""
def __init__(self, d=None, reversed=True):
dict.__init__(self)
self._o = [] # The ordered keys.
self._f = reversed and self._insertkey or self._appendkey
if d != None: self.update(dict(d))
@property
def reversed(self):
return self._f == self._insertkey
@classmethod
def fromkeys(cls, k, v=None, reversed=True):
d = cls(reversed=reversed)
for k in k: d.__setitem__(k,v)
return d
def _insertkey(self, k):
if k not in self: self._o.insert(0,k) # Sort newest-first with reversed=True.
def _appendkey(self, k):
if k not in self: self._o.append(k) # Sort oldest-first with reversed=False.
def append(self, (k, v)):
""" Takes a (key, value)-tuple. Sets the given key to the given value.
If the key exists, pushes the updated item to the head (or tail) of the dict.
"""
if k in self: self.__delitem__(k)
self.__setitem__(k,v)
def update(self, d):
for k,v in d.items(): self.__setitem__(k,v)
def setdefault(self, k, v=None):
if not k in self: self.__setitem__(k,v)
return self[k]
def __setitem__(self, k, v):
self._f(k); dict.__setitem__(self, k, v)
def __delitem__(self, k):
dict.__delitem__(self, k); self._o.remove(k)
def pop(self, k):
self._o.remove(k); return dict.pop(self, k)
def clear(self):
dict.clear(self); self._o=[]
def keys(self):
return self._o
def values(self):
return map(self.get, self._o)
def items(self):
return zip(self._o, self.values())
def __iter__(self):
return self._o.__iter__()
def copy(self):
d = self.__class__(reversed=self.reversed)
for k,v in (self.reversed and reversed(self.items()) or self.items()): d[k] = v
return d
def __repr__(self):
return "{%s}" % ", ".join(["%s: %s" % (repr(k), repr(v)) for k, v in self.items()])
#--- TAXONOMY --------------------------------------------------------------------------------------
class Taxonomy(dict):
def __init__(self):
""" Hierarchical tree of words classified by semantic type.
For example: "rose" and "daffodil" can be classified as "flower":
taxonomy.append("rose", type="flower")
taxonomy.append("daffodil", type="flower")
print taxonomy.children("flower")
Taxonomy terms can be used in a Pattern:
FLOWER will match "flower" as well as "rose" and "daffodil".
The taxonomy is case insensitive by default.
"""
self.case_sensitive = False
self._values = {}
self.classifiers = []
def _normalize(self, term):
try:
return not self.case_sensitive and term.lower() or term
except: # Not a string.
return term
def __contains__(self, term):
# Check if the term is in the dictionary.
# If the term is not in the dictionary, check the classifiers.
term = self._normalize(term)
if dict.__contains__(self, term):
return True
for classifier in self.classifiers:
if classifier.parents(term) \
or classifier.children(term):
return True
return False
def append(self, term, type=None, value=None):
""" Appends the given term to the taxonomy and tags it as the given type.
Optionally, a disambiguation value can be supplied.
For example: taxonomy.append("many", "quantity", "50-200")
"""
term = self._normalize(term)
type = self._normalize(type)
self.setdefault(term, (odict(), odict()))[0].append((type, True))
self.setdefault(type, (odict(), odict()))[1].append((term, True))
self._values[term] = value
def classify(self, term, **kwargs):
""" Returns the (most recently added) semantic type for the given term ("many" => "quantity").
If the term is not in the dictionary, try Taxonomy.classifiers.
"""
term = self._normalize(term)
if dict.__contains__(self, term):
return self[term][0].keys()[-1]
# If the term is not in the dictionary, check the classifiers.
# Returns the first term in the list returned by a classifier.
for classifier in self.classifiers:
# **kwargs are useful if the classifier requests extra information,
# for example the part-of-speech tag.
v = classifier.parents(term, **kwargs)
if v:
return v[0]
def parents(self, term, recursive=False, **kwargs):
""" Returns a list of all semantic types for the given term.
If recursive=True, traverses parents up to the root.
"""
def dfs(term, recursive=False, visited={}, **kwargs):
if term in visited: # Break on cyclic relations.
return []
visited[term], a = True, []
if dict.__contains__(self, term):
a = self[term][0].keys()
for classifier in self.classifiers:
a.extend(classifier.parents(term, **kwargs) or [])
if recursive:
for w in a: a += dfs(w, recursive, visited, **kwargs)
return a
return unique2(dfs(self._normalize(term), recursive, {}, **kwargs))
def children(self, term, recursive=False, **kwargs):
""" Returns all terms of the given semantic type: "quantity" => ["many", "lot", "few", ...]
If recursive=True, traverses children down to the leaves.
"""
def dfs(term, recursive=False, visited={}, **kwargs):
if term in visited: # Break on cyclic relations.
return []
visited[term], a = True, []
if dict.__contains__(self, term):
a = self[term][1].keys()
for classifier in self.classifiers:
a.extend(classifier.children(term, **kwargs) or [])
if recursive:
for w in a: a += dfs(w, recursive, visited, **kwargs)
return a
return unique2(dfs(self._normalize(term), recursive, {}, **kwargs))
def value(self, term, **kwargs):
""" Returns the value of the given term ("many" => "50-200")
"""
term = self._normalize(term)
if term in self._values:
return self._values[term]
for classifier in self.classifiers:
v = classifier.value(term, **kwargs)
if v is not None:
return v
def remove(self, term):
if dict.__contains__(self, term):
for w in self.parents(term):
self[w][1].pop(term)
dict.pop(self, term)
# Global taxonomy:
TAXONOMY = taxonomy = Taxonomy()
#taxonomy.append("rose", type="flower")
#taxonomy.append("daffodil", type="flower")
#taxonomy.append("flower", type="plant")
#print taxonomy.classify("rose")
#print taxonomy.children("plant", recursive=True)
#c = Classifier(parents=lambda term: term.endswith("ness") and ["quality"] or [])
#taxonomy.classifiers.append(c)
#print taxonomy.classify("roughness")
#--- TAXONOMY CLASSIFIER ---------------------------------------------------------------------------
class Classifier:
def __init__(self, parents=lambda term: [], children=lambda term: [], value=lambda term: None):
""" A classifier uses a rule-based approach to enrich the taxonomy, for example:
c = Classifier(parents=lambda term: term.endswith("ness") and ["quality"] or [])
taxonomy.classifiers.append(c)
This tags any word ending in -ness as "quality".
This is much shorter than manually adding "roughness", "sharpness", ...
Other examples of useful classifiers: calling en.wordnet.Synset.hyponyms() or en.number().
"""
self.parents = parents
self.children = children
self.value = value
# Classifier(parents=lambda word: word.endswith("ness") and ["quality"] or [])
# Classifier(parents=lambda word, chunk=None: chunk=="VP" and [ACTION] or [])
class WordNetClassifier(Classifier):
def __init__(self, wordnet=None):
if wordnet is None:
try: from en import wordnet
except:
pass
Classifier.__init__(self, self._parents, self._children)
self.wordnet = wordnet
def _children(self, word, pos="NN"):
try: return [w.senses[0] for w in self.wordnet.synsets(word, pos)[0].hyponyms()]
except KeyError:
pass
def _parents(self, word, pos="NN"):
try: return [w.senses[0] for w in self.wordnet.synsets(word, pos)[0].hypernyms()]
except KeyError:
pass
#from en import wordnet
#taxonomy.classifiers.append(WordNetClassifier(wordnet))
#print taxonomy.parents("ponder", pos="VB")
#print taxonomy.children("computer")
#### PATTERN #######################################################################################
#--- PATTERN CONSTRAINT ----------------------------------------------------------------------------
# Allowed chunk, role and part-of-speech tags (Penn Treebank II):
CHUNKS = dict.fromkeys(["NP", "PP", "VP", "ADVP", "ADJP", "SBAR", "PRT", "INTJ"], True)
ROLES = dict.fromkeys(["SBJ", "OBJ", "PRD", "TMP", "CLR", "LOC", "DIR", "EXT", "PRP"], True)
TAGS = dict.fromkeys(["CC", "CD", "DT", "EX", "FW", "IN", "JJ", "JJR", "JJS", "JJ*",
"LS", "MD", "NN", "NNS", "NNP", "NNPS", "NN*", "PDT", "PRP",
"PRP$", "PRP*", "RB", "RBR", "RBS", "RB*", "RP", "SYM", "TO",
"UH", "VB", "VBZ", "VBP", "VBD", "VBN", "VBG", "VB*",
"WDT", "WP*", "WRB", ".", ",", ":", "(", ")"], True)
ALPHA = re.compile("[a-zA-Z]")
has_alpha = lambda string: ALPHA.match(string) is not None
class Constraint:
def __init__(self, words=[], tags=[], chunks=[], roles=[], taxa=[], optional=False, multiple=False, first=False, taxonomy=TAXONOMY, exclude=None):
""" A range of words, tags and taxonomy terms that matches certain words in a sentence.
For example:
Constraint.fromstring("with|of") matches either "with" or "of".
Constraint.fromstring("(JJ)") optionally matches an adjective.
Constraint.fromstring("NP|SBJ") matches subject noun phrases.
Constraint.fromstring("QUANTITY|QUALITY") matches quantity-type and quality-type taxa.
"""
self.index = 0
self.words = list(words) # Allowed words/lemmata (of, with, ...)
self.tags = list(tags) # Allowed parts-of-speech (NN, JJ, ...)
self.chunks = list(chunks) # Allowed chunk types (NP, VP, ...)
self.roles = list(roles) # Allowed chunk roles (SBJ, OBJ, ...)
self.taxa = list(taxa) # Allowed word categories.
self.taxonomy = taxonomy
self.optional = optional
self.multiple = multiple
self.first = first
self.exclude = exclude # Constraint of words that are *not* allowed, or None.
@classmethod
def fromstring(cls, s, **kwargs):
""" Returns a new Constraint from the given string.
Uppercase words indicate either a tag ("NN", "JJ", "VP")
or a taxonomy term (e.g., "PRODUCT", "PERSON").
Syntax:
( defines an optional constraint, e.g., "(JJ)".
[ defines a constraint with spaces, e.g., "[Mac OS X | Windows Vista]".
_ is converted to spaces, e.g., "Windows_Vista".
| separates different options, e.g., "ADJP|ADVP".
! can be used as a word prefix to disallow it.
* can be used as a wildcard character, e.g., "soft*|JJ*".
? as a suffix defines a constraint that is optional, e.g., "JJ?".
+ as a suffix defines a constraint that can span multiple words, e.g., "JJ+".
^ as a prefix defines a constraint that can only match the first word.
These characters need to be escaped if used as content: "\(".
"""
C = cls(**kwargs)
s = s.strip()
s = s.strip("{}")
s = s.strip()
for i in range(3):
# Wrapping order of control characters is ignored:
# (NN+) == (NN)+ == NN?+ == NN+? == [NN+?] == [NN]+?
if s.startswith("^"):
s = s[1: ]; C.first = True
if s.endswith("+") and not s.endswith("\+"):
s = s[0:-1]; C.multiple = True
if s.endswith("?") and not s.endswith("\?"):
s = s[0:-1]; C.optional = True
if s.startswith("(") and s.endswith(")"):
s = s[1:-1]; C.optional = True
if s.startswith("[") and s.endswith("]"):
s = s[1:-1]
s = re.sub(r"^\\\^", "^", s)
s = re.sub(r"\\\+$", "+", s)
s = s.replace("\_", "&uscore;")
s = s.replace("_"," ")
s = s.replace("&uscore;", "_")
s = s.replace("&lparen;", "(")
s = s.replace("&rparen;", ")")
s = s.replace("[", "[")
s = s.replace("]", "]")
s = s.replace("&lcurly;", "{")
s = s.replace("&rcurly;", "}")
s = s.replace("\(", "(")
s = s.replace("\)", ")")
s = s.replace("\[", "[")
s = s.replace("\]", "]")
s = s.replace("\{", "{")
s = s.replace("\}", "}")
s = s.replace("\*", "*")
s = s.replace("\?", "?")
s = s.replace("\+", "+")
s = s.replace("\^", "^")
s = s.replace("\|", "⊢")
s = s.split("|")
s = [v.replace("⊢", "|").strip() for v in s]
for v in s:
C._append(v)
return C
def _append(self, v):
if v.startswith("!") and self.exclude is None:
self.exclude = Constraint()
if v.startswith("!"):
self.exclude._append(v[1:]); return
if "!" in v:
v = v.replace("\!", "!")
if v != v.upper():
self.words.append(v.lower())
elif v in TAGS:
self.tags.append(v)
elif v in CHUNKS:
self.chunks.append(v)
elif v in ROLES:
self.roles.append(v)
elif v in self.taxonomy or has_alpha(v):
self.taxa.append(v.lower())
else:
# Uppercase words indicate tags or taxonomy terms.
# However, this also matches "*" or "?" or "0.25".
# Unless such punctuation is defined in the taxonomy, it is added to Range.words.
self.words.append(v.lower())
def match(self, word):
""" Return True if the given Word is part of the constraint:
- the word (or lemma) occurs in Constraint.words, OR
- the word (or lemma) occurs in Constraint.taxa taxonomy tree, AND
- the word and/or chunk tags match those defined in the constraint.
Individual terms in Constraint.words or the taxonomy can contain wildcards (*).
Some part-of-speech-tags can also contain wildcards: NN*, VB*, JJ*, RB*
If the given word contains spaces (e.g., proper noun),
the entire chunk will also be compared.
For example: Constraint(words=["Mac OS X*"])
matches the word "Mac" if the word occurs in a Chunk("Mac OS X 10.5").
"""
# If the constraint can only match the first word, Word.index must be 0.
if self.first and word.index > 0:
return False
# If the constraint defines excluded options, Word can not match any of these.
if self.exclude and self.exclude.match(word):
return False
# If the constraint defines allowed tags, Word.tag needs to match one of these.
if self.tags:
if find(lambda w: _match(word.tag, w), self.tags) is None:
return False
# If the constraint defines allowed chunks, Word.chunk.tag needs to match one of these.
if self.chunks:
ch = word.chunk and word.chunk.tag or None
if find(lambda w: _match(ch, w), self.chunks) is None:
return False
# If the constraint defines allowed role, Word.chunk.tag needs to match one of these.
if self.roles:
R = word.chunk and [r2 for r1, r2 in word.chunk.relations] or []
if find(lambda w: w in R, self.roles) is None:
return False
# If the constraint defines allowed words,
# Word.string.lower() OR Word.lemma needs to match one of these.
b = True # b==True when word in constraint (or Constraints.words=[]).
if len(self.words) + len(self.taxa) > 0:
s1 = word.string.lower()
s2 = word.lemma
b = False
for w in itertools.chain(self.words, self.taxa):
# If the constraint has a word with spaces (e.g., a proper noun),
# compare it to the entire chunk.
try:
if " " in w and (s1 in w or s2 and s2 in w or "*" in w):
s1 = word.chunk and word.chunk.string.lower() or s1
s2 = word.chunk and " ".join([x or "" for x in word.chunk.lemmata]) or s2
except:
s1 = s1
s2 = None
# Compare the word to the allowed words (which can contain wildcards).
if _match(s1, w):
b=True; break
# Compare the word lemma to the allowed words, e.g.,
# if "was" is not in the constraint, perhaps "be" is, which is a good match.
if s2 and _match(s2, w):
b=True; break
# If the constraint defines allowed taxonomy terms,
# and the given word did not match an allowed word, traverse the taxonomy.
# The search goes up from the given word to its parents in the taxonomy.
# This is faster than traversing all the children of terms in Constraint.taxa.
# The drawback is that:
# 1) Wildcards in the taxonomy are not detected (use classifiers instead),
# 2) Classifier.children() has no effect, only Classifier.parent().
if self.taxa and (not self.words or (self.words and not b)):
for s in (
word.string, # "ants"
word.lemma, # "ant"
word.chunk and word.chunk.string or None, # "army ants"
word.chunk and " ".join([x or "" for x in word.chunk.lemmata]) or None): # "army ant"
if s is not None:
if self.taxonomy.case_sensitive is False:
s = s.lower()
# Compare ancestors of the word to each term in Constraint.taxa.
for p in self.taxonomy.parents(s, recursive=True):
if find(lambda s: p==s, self.taxa): # No wildcards.
return True
return b
def __repr__(self):
s = []
for k,v in (
( "words", self.words),
( "tags", self.tags),
("chunks", self.chunks),
( "roles", self.roles),
( "taxa", self.taxa)):
if v: s.append("%s=%s" % (k, repr(v)))
return "Constraint(%s)" % ", ".join(s)
@property
def string(self):
a = self.words + self.tags + self.chunks + self.roles + [w.upper() for w in self.taxa]
a = (escape(s) for s in a)
a = (s.replace("\\*", "*") for s in a)
a = [s.replace(" ", "_") for s in a]
if self.exclude:
a.extend("!"+s for s in self.exclude.string[1:-1].split("|"))
return (self.optional and "%s(%s)%s" or "%s[%s]%s") % (
self.first and "^" or "", "|".join(a), self.multiple and "+" or "")
#--- PATTERN ---------------------------------------------------------------------------------------
STRICT = "strict"
GREEDY = "greedy"
class Pattern:
def __init__(self, sequence=[], *args, **kwargs):
""" A sequence of constraints that matches certain phrases in a sentence.
The given list of Constraint objects can contain nested lists (groups).
"""
# Parse nested lists and tuples from the sequence into groups.
# [DT [JJ NN]] => Match.group(1) will yield the JJ NN sequences.
def _ungroup(sequence, groups=None):
for v in sequence:
if isinstance(v, (list, tuple)):
if groups is not None:
groups.append(list(_ungroup(v, groups=None)))
for v in _ungroup(v, groups):
yield v
else:
yield v
self.groups = []
self.sequence = list(_ungroup(sequence, groups=self.groups))
# Assign Constraint.index:
i = 0
for constraint in self.sequence:
constraint.index = i; i+=1
# There are two search modes: STRICT and GREEDY.
# - In STRICT, "rabbit" matches only the string "rabbit".
# - In GREEDY, "rabbit|NN" matches the string "rabbit" tagged "NN".
# - In GREEDY, "rabbit" matches "the big white rabbit" (the entire chunk is a match).
# - Pattern.greedy(chunk, constraint) determines (True/False) if a chunk is a match.
self.strict = kwargs.get("strict", STRICT in args and not GREEDY in args)
self.greedy = kwargs.get("greedy", lambda chunk, constraint: True)
def __iter__(self):
return iter(self.sequence)
def __len__(self):
return len(self.sequence)
def __getitem__(self, i):
return self.sequence[i]
@classmethod
def fromstring(cls, s, *args, **kwargs):
""" Returns a new Pattern from the given string.
Constraints are separated by a space.
If a constraint contains a space, it must be wrapped in [].
"""
s = s.replace("\(", "&lparen;")
s = s.replace("\)", "&rparen;")
s = s.replace("\[", "[")
s = s.replace("\]", "]")
s = s.replace("\{", "&lcurly;")
s = s.replace("\}", "&rcurly;")
p = []
i = 0
for m in re.finditer(r"\[.*?\]|\(.*?\)", s):
# Spaces in a range encapsulated in square brackets are encoded.
# "[Windows Vista]" is one range, don't split on space.
p.append(s[i:m.start()])
p.append(s[m.start():m.end()].replace(" ", "&space;")); i=m.end()
p.append(s[i:])
s = "".join(p)
s = s.replace("][", "] [")
s = s.replace(")(", ") (")
s = s.replace("\|", "⊢")
s = re.sub(r"\s+\|\s+", "|", s)
s = re.sub(r"\s+", " ", s)
s = re.sub(r"\{\s+", "{", s)
s = re.sub(r"\s+\}", "}", s)
s = s.split(" ")
s = [v.replace("&space;"," ") for v in s]
P = cls([], *args, **kwargs)
G, O, i = [], [], 0
for s in s:
constraint = Constraint.fromstring(s.strip("{}"), taxonomy=kwargs.get("taxonomy", TAXONOMY))
constraint.index = len(P.sequence)
P.sequence.append(constraint)
# Push a new group on the stack if string starts with "{".
# Parse constraint from string, add it to all open groups.
# Pop latest group from stack if string ends with "}".
# Insert groups in opened-first order (i).
while s.startswith("{"):
s = s[1:]
G.append((i, [])); i+=1
O.append([])
for g in G:
g[1].append(constraint)
while s.endswith("}"):
s = s[:-1]
if G: O[G[-1][0]] = G[-1][1]; G.pop()
P.groups = [g for g in O if g]
return P
def search(self, sentence):
""" Returns a list of all matches found in the given sentence.
"""
if sentence.__class__.__name__ == "Text":
a=[]; [a.extend(self.search(s)) for s in sentence]; return a
a = []
v = self._variations()
u = {}
m = self.match(sentence, _v=v)
while m:
a.append(m)
m = self.match(sentence, start=m.words[-1].index+1, _v=v, _u=u)
return a
def match(self, sentence, start=0, _v=None, _u=None):
""" Returns the first match found in the given sentence, or None.
"""
if sentence.__class__.__name__ == "Text":
return find(lambda (m,s): m!=None, ((self.match(s, start, _v), s) for s in sentence))[0]
if isinstance(sentence, basestring):
sentence = Sentence(sentence)
# Variations (_v) further down the list may match words more to the front.
# We need to check all of them. Unmatched variations are blacklisted (_u).
# Pattern.search() calls Pattern.match() with a persistent blacklist (1.5x faster).
a = []
for sequence in (_v is not None and _v or self._variations()):
if _u is not None and id(sequence) in _u:
continue
m = self._match(sequence, sentence, start)
if m is not None:
a.append((m.words[0].index, len(m.words), m))
if m is not None and m.words[0].index == start:
return m
if m is None and _u is not None:
_u[id(sequence)] = False
# Return the leftmost-longest.
if len(a) > 0:
return sorted(a)[0][-1]
def _variations(self):
v = variations(self.sequence, optional=lambda constraint: constraint.optional)
v = sorted(v, key=len, reverse=True)
return v
def _match(self, sequence, sentence, start=0, i=0, w0=None, map=None, d=0):
# Backtracking tree search.
# Finds the first match in the sentence of the given sequence of constraints.
# start : the current word index.
# i : the current constraint index.
# w0 : the first word that matches a constraint.
# map : a dictionary of (Word index, Constraint) items.
# d : recursion depth.
# XXX - We can probably rewrite all of this using (faster) regular expressions.
if map is None:
map = {}
# MATCH
if i == len(sequence):
if w0 is not None:
w1 = sentence.words[start-1]
# Greedy algorithm:
# - "cat" matches "the big cat" if "cat" is head of the chunk.
# - "Tom" matches "Tom the cat" if "Tom" is head of the chunk.
# - This behavior is ignored with POS-tag constraints:
# "Tom|NN" can only match single words, not chunks.
# - This is also True for negated POS-tags (e.g., !NN).
w01 = [w0, w1]
for j in (0, -1):
constraint, w = sequence[j], w01[j]
if self.strict is False and w.chunk is not None:
if len(constraint.tags) == 0:
if constraint.exclude is None or len(constraint.exclude.tags) == 0:
if constraint.match(w.chunk.head):
w01[j] = w.chunk.words[j]
if constraint.exclude and constraint.exclude.match(w.chunk.head):
return None
if self.greedy(w.chunk, constraint) is False: # User-defined.
return None
w0, w1 = w01
# Update map for optional chunk words (see below).
words = sentence.words[w0.index:w1.index+1]
for w in words:
if w.index not in map and w.chunk:
wx = find(lambda w: w.index in map, reversed(w.chunk.words))
if wx:
map[w.index] = map[wx.index]
# Return matched word range, we'll need the map to build Match.constituents().
return Match(self, words, map)
return None
# RECURSION
constraint = sequence[i]
for w in sentence.words[start:]:
#print " "*d, "match?", w, sequence[i].string # DEBUG
if i < len(sequence) and constraint.match(w):
#print " "*d, "match!", w, sequence[i].string # DEBUG
map[w.index] = constraint
if constraint.multiple:
# Next word vs. same constraint if Constraint.multiple=True.
m = self._match(sequence, sentence, w.index+1, i, w0 or w, map, d+1)
if m: return m
# Next word vs. next constraint.
m = self._match(sequence, sentence, w.index+1, i+1, w0 or w, map, d+1)
if m: return m
# Chunk words other than the head are optional:
# - Pattern.fromstring("cat") matches "cat" but also "the big cat" (overspecification).
# - Pattern.fromstring("cat|NN") does not match "the big cat" (explicit POS-tag).
if w0 is not None and len(constraint.tags) == 0:
if constraint.exclude is None or len(constraint.exclude.tags) == 0:
if self.strict is False and w.chunk is not None and w.chunk.head != w:
continue
break
# Part-of-speech tags match one single word.
if w0 is not None and len(constraint.tags) > 0:
break
if w0 is not None and constraint.exclude and len(constraint.exclude.tags) > 0:
break
@property
def string(self):
return " ".join(constraint.string for constraint in self.sequence)
_cache = {}
_CACHE_SIZE = 100 # Number of dynamic Pattern objects to keep in cache.
def compile(pattern, *args, **kwargs):
""" Returns a Pattern from the given string or regular expression.
Recently compiled patterns are kept in cache.
"""
id, p = repr(pattern)+repr(args), None
if id in _cache:
return _cache[id]
if isinstance(pattern, basestring):
p = Pattern.fromstring(pattern, *args, **kwargs)
if isinstance(pattern, regexp):
p = Pattern([Constraint(words=[pattern], taxonomy=kwargs.get("taxonomy", TAXONOMY))], *args, **kwargs)
if len(_cache) > _CACHE_SIZE:
_cache.clear()
if isinstance(p, Pattern):
_cache[id] = p
return p
else:
raise TypeError, "can't compile '%s' object" % pattern.__class__.__name__
def match(pattern, sentence, *args, **kwargs):
""" Returns the first match found in the given sentence, or None.
"""
return compile(pattern, *args, **kwargs).match(sentence)
def search(pattern, sentence, *args, **kwargs):
""" Returns a list of all matches found in the given sentence.
"""
return compile(pattern, *args, **kwargs).search(sentence)
def escape(string):
""" Returns the string with control characters for Pattern syntax escaped.
For example: "hello!" => "hello\!".
"""
for ch in ("{","}","[","]","(",")","_","|","!","*","+","^"):
string = string.replace(ch, "\\"+ch)
return string
#--- PATTERN MATCH ---------------------------------------------------------------------------------
class Match:
def __init__(self, pattern, words=[], map={}):
""" Search result returned from Pattern.match(sentence),
containing a sequence of Word objects.
"""
self.pattern = pattern
self.words = words
self._map1 = dict() # Word index to Constraint.
self._map2 = dict() # Constraint index to list of Word indices.
for w in self.words:
self._map1[w.index] = map[w.index]
for k,v in self._map1.items():
self._map2.setdefault(self.pattern.sequence.index(v),[]).append(k)
for k,v in self._map2.items():
v.sort()
def __len__(self):
return len(self.words)
def __iter__(self):
return iter(self.words)
def __getitem__(self, i):
return self.words.__getitem__(i)
@property
def start(self):
return self.words and self.words[0].index or None
@property
def stop(self):
return self.words and self.words[-1].index+1 or None
def constraint(self, word):
""" Returns the constraint that matches the given Word, or None.
"""
if word.index in self._map1:
return self._map1[word.index]
def constraints(self, chunk):
""" Returns a list of constraints that match the given Chunk.
"""
a = [self._map1[w.index] for w in chunk.words if w.index in self._map1]
b = []; [b.append(constraint) for constraint in a if constraint not in b]
return b
def constituents(self, constraint=None):
""" Returns a list of Word and Chunk objects,
where words have been grouped into their chunks whenever possible.
Optionally, returns only chunks/words that match given constraint(s), or constraint index.
"""
# Select only words that match the given constraint.
# Note: this will only work with constraints from Match.pattern.sequence.
W = self.words
n = len(self.pattern.sequence)
if isinstance(constraint, (int, Constraint)):
if isinstance(constraint, int):
i = constraint
i = i<0 and i%n or i
else:
i = self.pattern.sequence.index(constraint)
W = self._map2.get(i,[])
W = [self.words[i-self.words[0].index] for i in W]
if isinstance(constraint, (list, tuple)):
W = []; [W.extend(self._map2.get(j<0 and j%n or j,[])) for j in constraint]
W = [self.words[i-self.words[0].index] for i in W]
W = unique(W)
a = []
i = 0
while i < len(W):
w = W[i]
if w.chunk and W[i:i+len(w.chunk)] == w.chunk.words:
i += len(w.chunk) - 1
a.append(w.chunk)
else:
a.append(w)
i += 1
return a
def group(self, index, chunked=False):
""" Returns a list of Word objects that match the given group.
With chunked=True, returns a list of Word + Chunk objects - see Match.constituents().
A group consists of consecutive constraints wrapped in { }, e.g.,
search("{JJ JJ} NN", Sentence(parse("big black cat"))).group(1) => big black.
"""
if index < 0 or index > len(self.pattern.groups):
raise IndexError, "no such group"
if index > 0 and index <= len(self.pattern.groups):
g = self.pattern.groups[index-1]
if index == 0:
g = self.pattern.sequence
if chunked is True:
return Group(self, self.constituents(constraint=[self.pattern.sequence.index(x) for x in g]))
return Group(self, [w for w in self.words if self.constraint(w) in g])
@property
def string(self):
return " ".join(w.string for w in self.words)
def __repr__(self):
return "Match(words=%s)" % repr(self.words)
#--- PATTERN MATCH GROUP ---------------------------------------------------------------------------
class Group(list):
def __init__(self, match, words):
list.__init__(self, words)
self.match = match
@property
def words(self):
return list(self)
@property
def start(self):
return self and self[0].index or None
@property
def stop(self):
return self and self[-1].index+1 or None
@property
def string(self):
return " ".join(w.string for w in self)
#from en import Sentence, parse
#s = Sentence(parse("I was looking at the big cat, and the big cat was staring back", lemmata=True))
#p = Pattern.fromstring("(*_look*|)+ (at|)+ (DT|)+ (*big_cat*)+")
#
#def profile():
# import time
# t = time.time()
# for i in range(100):
# p.search(s)
# print time.time()-t
#
#import cProfile
##import pstats
#cProfile.run("profile()", "_profile")
#p = pstats.Stats("_profile")
#p.stream = open("_profile", "w")
#p.sort_stats("time").print_stats(30)
#p.stream.close()
#s = open("_profile").read()
#print s
| apache-2.0 | -573,557,773,068,196,400 | 40.967936 | 150 | 0.528603 | false |
hanx11/shadowsocks | shadowsocks/crypto/rc4_md5.py | 1042 | 1339 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import hashlib
from shadowsocks.crypto import openssl
__all__ = ['ciphers']
def create_cipher(alg, key, iv, op, key_as_bytes=0, d=None, salt=None,
i=1, padding=1):
md5 = hashlib.md5()
md5.update(key)
md5.update(iv)
rc4_key = md5.digest()
return openssl.OpenSSLCrypto(b'rc4', rc4_key, b'', op)
ciphers = {
'rc4-md5': (16, 16, create_cipher),
}
def test():
from shadowsocks.crypto import util
cipher = create_cipher('rc4-md5', b'k' * 32, b'i' * 16, 1)
decipher = create_cipher('rc4-md5', b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
if __name__ == '__main__':
test()
| apache-2.0 | -5,927,778,082,856,169,000 | 25.254902 | 75 | 0.671397 | false |
mrquim/repository.mrquim | script.video.F4mProxy/lib/f4mUtils/codec.py | 88 | 2613 | # Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
"""Classes for reading/writing binary data (such as TLS records)."""
from .compat import *
class Writer(object):
def __init__(self):
self.bytes = bytearray(0)
def add(self, x, length):
self.bytes += bytearray(length)
newIndex = len(self.bytes) - 1
for count in range(length):
self.bytes[newIndex] = x & 0xFF
x >>= 8
newIndex -= 1
def addFixSeq(self, seq, length):
for e in seq:
self.add(e, length)
def addVarSeq(self, seq, length, lengthLength):
self.add(len(seq)*length, lengthLength)
for e in seq:
self.add(e, length)
class Parser(object):
def __init__(self, bytes):
self.bytes = bytes
self.index = 0
def get(self, length):
if self.index + length > len(self.bytes):
raise SyntaxError()
x = 0
for count in range(length):
x <<= 8
x |= self.bytes[self.index]
self.index += 1
return x
def getFixBytes(self, lengthBytes):
if self.index + lengthBytes > len(self.bytes):
raise SyntaxError()
bytes = self.bytes[self.index : self.index+lengthBytes]
self.index += lengthBytes
return bytes
def getVarBytes(self, lengthLength):
lengthBytes = self.get(lengthLength)
return self.getFixBytes(lengthBytes)
def getFixList(self, length, lengthList):
l = [0] * lengthList
for x in range(lengthList):
l[x] = self.get(length)
return l
def getVarList(self, length, lengthLength):
lengthList = self.get(lengthLength)
if lengthList % length != 0:
raise SyntaxError()
lengthList = lengthList // length
l = [0] * lengthList
for x in range(lengthList):
l[x] = self.get(length)
return l
def startLengthCheck(self, lengthLength):
self.lengthCheck = self.get(lengthLength)
self.indexCheck = self.index
def setLengthCheck(self, length):
self.lengthCheck = length
self.indexCheck = self.index
def stopLengthCheck(self):
if (self.index - self.indexCheck) != self.lengthCheck:
raise SyntaxError()
def atLengthCheck(self):
if (self.index - self.indexCheck) < self.lengthCheck:
return False
elif (self.index - self.indexCheck) == self.lengthCheck:
return True
else:
raise SyntaxError()
| gpl-2.0 | 3,949,432,043,730,541,000 | 28.359551 | 72 | 0.581707 | false |
alphafoobar/intellij-community | python/lib/Lib/site-packages/django/contrib/auth/backends.py | 230 | 4582 | from django.db import connection
from django.contrib.auth.models import User, Permission
class ModelBackend(object):
"""
Authenticates against django.contrib.auth.models.User.
"""
supports_object_permissions = False
supports_anonymous_user = True
supports_inactive_user = True
# TODO: Model, login attribute name and password attribute name should be
# configurable.
def authenticate(self, username=None, password=None):
try:
user = User.objects.get(username=username)
if user.check_password(password):
return user
except User.DoesNotExist:
return None
def get_group_permissions(self, user_obj):
"""
Returns a set of permission strings that this user has through his/her
groups.
"""
if not hasattr(user_obj, '_group_perm_cache'):
if user_obj.is_superuser:
perms = Permission.objects.all()
else:
perms = Permission.objects.filter(group__user=user_obj)
perms = perms.values_list('content_type__app_label', 'codename').order_by()
user_obj._group_perm_cache = set(["%s.%s" % (ct, name) for ct, name in perms])
return user_obj._group_perm_cache
def get_all_permissions(self, user_obj):
if user_obj.is_anonymous():
return set()
if not hasattr(user_obj, '_perm_cache'):
user_obj._perm_cache = set([u"%s.%s" % (p.content_type.app_label, p.codename) for p in user_obj.user_permissions.select_related()])
user_obj._perm_cache.update(self.get_group_permissions(user_obj))
return user_obj._perm_cache
def has_perm(self, user_obj, perm):
if not user_obj.is_active:
return False
return perm in self.get_all_permissions(user_obj)
def has_module_perms(self, user_obj, app_label):
"""
Returns True if user_obj has any permissions in the given app_label.
"""
if not user_obj.is_active:
return False
for perm in self.get_all_permissions(user_obj):
if perm[:perm.index('.')] == app_label:
return True
return False
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
class RemoteUserBackend(ModelBackend):
"""
This backend is to be used in conjunction with the ``RemoteUserMiddleware``
found in the middleware module of this package, and is used when the server
is handling authentication outside of Django.
By default, the ``authenticate`` method creates ``User`` objects for
usernames that don't already exist in the database. Subclasses can disable
this behavior by setting the ``create_unknown_user`` attribute to
``False``.
"""
# Create a User object if not already in the database?
create_unknown_user = True
def authenticate(self, remote_user):
"""
The username passed as ``remote_user`` is considered trusted. This
method simply returns the ``User`` object with the given username,
creating a new ``User`` object if ``create_unknown_user`` is ``True``.
Returns None if ``create_unknown_user`` is ``False`` and a ``User``
object with the given username is not found in the database.
"""
if not remote_user:
return
user = None
username = self.clean_username(remote_user)
# Note that this could be accomplished in one try-except clause, but
# instead we use get_or_create when creating unknown users since it has
# built-in safeguards for multiple threads.
if self.create_unknown_user:
user, created = User.objects.get_or_create(username=username)
if created:
user = self.configure_user(user)
else:
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
pass
return user
def clean_username(self, username):
"""
Performs any cleaning on the "username" prior to using it to get or
create the user object. Returns the cleaned username.
By default, returns the username unchanged.
"""
return username
def configure_user(self, user):
"""
Configures a user after creation and returns the updated user.
By default, returns the user unmodified.
"""
return user
| apache-2.0 | -5,614,252,204,261,928,000 | 35.365079 | 143 | 0.61567 | false |
hilaskis/UAV_MissionPlanner | Lib/xml/sax/xmlreader.py | 113 | 12632 | """An XML Reader is the SAX 2 name for an XML parser. XML Parsers
should be based on this code. """
import handler
from _exceptions import SAXNotSupportedException, SAXNotRecognizedException
# ===== XMLREADER =====
class XMLReader:
"""Interface for reading an XML document using callbacks.
XMLReader is the interface that an XML parser's SAX2 driver must
implement. This interface allows an application to set and query
features and properties in the parser, to register event handlers
for document processing, and to initiate a document parse.
All SAX interfaces are assumed to be synchronous: the parse
methods must not return until parsing is complete, and readers
must wait for an event-handler callback to return before reporting
the next event."""
def __init__(self):
self._cont_handler = handler.ContentHandler()
self._dtd_handler = handler.DTDHandler()
self._ent_handler = handler.EntityResolver()
self._err_handler = handler.ErrorHandler()
def parse(self, source):
"Parse an XML document from a system identifier or an InputSource."
raise NotImplementedError("This method must be implemented!")
def getContentHandler(self):
"Returns the current ContentHandler."
return self._cont_handler
def setContentHandler(self, handler):
"Registers a new object to receive document content events."
self._cont_handler = handler
def getDTDHandler(self):
"Returns the current DTD handler."
return self._dtd_handler
def setDTDHandler(self, handler):
"Register an object to receive basic DTD-related events."
self._dtd_handler = handler
def getEntityResolver(self):
"Returns the current EntityResolver."
return self._ent_handler
def setEntityResolver(self, resolver):
"Register an object to resolve external entities."
self._ent_handler = resolver
def getErrorHandler(self):
"Returns the current ErrorHandler."
return self._err_handler
def setErrorHandler(self, handler):
"Register an object to receive error-message events."
self._err_handler = handler
def setLocale(self, locale):
"""Allow an application to set the locale for errors and warnings.
SAX parsers are not required to provide localization for errors
and warnings; if they cannot support the requested locale,
however, they must throw a SAX exception. Applications may
request a locale change in the middle of a parse."""
raise SAXNotSupportedException("Locale support not implemented")
def getFeature(self, name):
"Looks up and returns the state of a SAX2 feature."
raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
def setFeature(self, name, state):
"Sets the state of a SAX2 feature."
raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
def getProperty(self, name):
"Looks up and returns the value of a SAX2 property."
raise SAXNotRecognizedException("Property '%s' not recognized" % name)
def setProperty(self, name, value):
"Sets the value of a SAX2 property."
raise SAXNotRecognizedException("Property '%s' not recognized" % name)
class IncrementalParser(XMLReader):
"""This interface adds three extra methods to the XMLReader
interface that allow XML parsers to support incremental
parsing. Support for this interface is optional, since not all
underlying XML parsers support this functionality.
When the parser is instantiated it is ready to begin accepting
data from the feed method immediately. After parsing has been
finished with a call to close the reset method must be called to
make the parser ready to accept new data, either from feed or
using the parse method.
Note that these methods must _not_ be called during parsing, that
is, after parse has been called and before it returns.
By default, the class also implements the parse method of the XMLReader
interface using the feed, close and reset methods of the
IncrementalParser interface as a convenience to SAX 2.0 driver
writers."""
def __init__(self, bufsize=2**16):
self._bufsize = bufsize
XMLReader.__init__(self)
def parse(self, source):
import saxutils
source = saxutils.prepare_input_source(source)
self.prepareParser(source)
file = source.getByteStream()
buffer = file.read(self._bufsize)
while buffer != "":
self.feed(buffer)
buffer = file.read(self._bufsize)
self.close()
def feed(self, data):
"""This method gives the raw XML data in the data parameter to
the parser and makes it parse the data, emitting the
corresponding events. It is allowed for XML constructs to be
split across several calls to feed.
feed may raise SAXException."""
raise NotImplementedError("This method must be implemented!")
def prepareParser(self, source):
"""This method is called by the parse implementation to allow
the SAX 2.0 driver to prepare itself for parsing."""
raise NotImplementedError("prepareParser must be overridden!")
def close(self):
"""This method is called when the entire XML document has been
passed to the parser through the feed method, to notify the
parser that there are no more data. This allows the parser to
do the final checks on the document and empty the internal
data buffer.
The parser will not be ready to parse another document until
the reset method has been called.
close may raise SAXException."""
raise NotImplementedError("This method must be implemented!")
def reset(self):
"""This method is called after close has been called to reset
the parser so that it is ready to parse new documents. The
results of calling parse or feed after close without calling
reset are undefined."""
raise NotImplementedError("This method must be implemented!")
# ===== LOCATOR =====
class Locator:
"""Interface for associating a SAX event with a document
location. A locator object will return valid results only during
calls to DocumentHandler methods; at any other time, the
results are unpredictable."""
def getColumnNumber(self):
"Return the column number where the current event ends."
return -1
def getLineNumber(self):
"Return the line number where the current event ends."
return -1
def getPublicId(self):
"Return the public identifier for the current event."
return None
def getSystemId(self):
"Return the system identifier for the current event."
return None
# ===== INPUTSOURCE =====
class InputSource:
"""Encapsulation of the information needed by the XMLReader to
read entities.
This class may include information about the public identifier,
system identifier, byte stream (possibly with character encoding
information) and/or the character stream of an entity.
Applications will create objects of this class for use in the
XMLReader.parse method and for returning from
EntityResolver.resolveEntity.
An InputSource belongs to the application, the XMLReader is not
allowed to modify InputSource objects passed to it from the
application, although it may make copies and modify those."""
def __init__(self, system_id = None):
self.__system_id = system_id
self.__public_id = None
self.__encoding = None
self.__bytefile = None
self.__charfile = None
def setPublicId(self, public_id):
"Sets the public identifier of this InputSource."
self.__public_id = public_id
def getPublicId(self):
"Returns the public identifier of this InputSource."
return self.__public_id
def setSystemId(self, system_id):
"Sets the system identifier of this InputSource."
self.__system_id = system_id
def getSystemId(self):
"Returns the system identifier of this InputSource."
return self.__system_id
def setEncoding(self, encoding):
"""Sets the character encoding of this InputSource.
The encoding must be a string acceptable for an XML encoding
declaration (see section 4.3.3 of the XML recommendation).
The encoding attribute of the InputSource is ignored if the
InputSource also contains a character stream."""
self.__encoding = encoding
def getEncoding(self):
"Get the character encoding of this InputSource."
return self.__encoding
def setByteStream(self, bytefile):
"""Set the byte stream (a Python file-like object which does
not perform byte-to-character conversion) for this input
source.
The SAX parser will ignore this if there is also a character
stream specified, but it will use a byte stream in preference
to opening a URI connection itself.
If the application knows the character encoding of the byte
stream, it should set it with the setEncoding method."""
self.__bytefile = bytefile
def getByteStream(self):
"""Get the byte stream for this input source.
The getEncoding method will return the character encoding for
this byte stream, or None if unknown."""
return self.__bytefile
def setCharacterStream(self, charfile):
"""Set the character stream for this input source. (The stream
must be a Python 2.0 Unicode-wrapped file-like that performs
conversion to Unicode strings.)
If there is a character stream specified, the SAX parser will
ignore any byte stream and will not attempt to open a URI
connection to the system identifier."""
self.__charfile = charfile
def getCharacterStream(self):
"Get the character stream for this input source."
return self.__charfile
# ===== ATTRIBUTESIMPL =====
class AttributesImpl:
def __init__(self, attrs):
"""Non-NS-aware implementation.
attrs should be of the form {name : value}."""
self._attrs = attrs
def getLength(self):
return len(self._attrs)
def getType(self, name):
return "CDATA"
def getValue(self, name):
return self._attrs[name]
def getValueByQName(self, name):
return self._attrs[name]
def getNameByQName(self, name):
if not name in self._attrs:
raise KeyError, name
return name
def getQNameByName(self, name):
if not name in self._attrs:
raise KeyError, name
return name
def getNames(self):
return self._attrs.keys()
def getQNames(self):
return self._attrs.keys()
def __len__(self):
return len(self._attrs)
def __getitem__(self, name):
return self._attrs[name]
def keys(self):
return self._attrs.keys()
def has_key(self, name):
return name in self._attrs
def __contains__(self, name):
return name in self._attrs
def get(self, name, alternative=None):
return self._attrs.get(name, alternative)
def copy(self):
return self.__class__(self._attrs)
def items(self):
return self._attrs.items()
def values(self):
return self._attrs.values()
# ===== ATTRIBUTESNSIMPL =====
class AttributesNSImpl(AttributesImpl):
def __init__(self, attrs, qnames):
"""NS-aware implementation.
attrs should be of the form {(ns_uri, lname): value, ...}.
qnames of the form {(ns_uri, lname): qname, ...}."""
self._attrs = attrs
self._qnames = qnames
def getValueByQName(self, name):
for (nsname, qname) in self._qnames.items():
if qname == name:
return self._attrs[nsname]
raise KeyError, name
def getNameByQName(self, name):
for (nsname, qname) in self._qnames.items():
if qname == name:
return nsname
raise KeyError, name
def getQNameByName(self, name):
return self._qnames[name]
def getQNames(self):
return self._qnames.values()
def copy(self):
return self.__class__(self._attrs, self._qnames)
def _test():
XMLReader()
IncrementalParser()
Locator()
if __name__ == "__main__":
_test()
| gpl-2.0 | 724,130,147,819,564,400 | 32.154856 | 78 | 0.661574 | false |
codepython/CollectorCity-Market-Place | stores/apps/sell/migrations/0004_auto__add_field_sell_cancel.py | 2 | 11769 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Sell.cancel'
db.add_column('sell_sell', 'cancel', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Sell.cancel'
db.delete_column('sell_sell', 'cancel')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'market.marketplace': {
'Meta': {'object_name': 'MarketPlace'},
'base_domain': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '92'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '92', 'db_index': 'True'}),
'template_prefix': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '92', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '92'})
},
'sell.cart': {
'Meta': {'object_name': 'Cart'},
'bidder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shippingdata': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['sell.ShippingData']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shops.Shop']"})
},
'sell.cartitem': {
'Meta': {'object_name': 'CartItem'},
'cart': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sell.Cart']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '11', 'decimal_places': '2'}),
'qty': ('django.db.models.fields.IntegerField', [], {})
},
'sell.payment': {
'Meta': {'object_name': 'Payment'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sell': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['sell.Sell']", 'unique': 'True'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shops.Shop']"}),
'state_actual': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'payment_history'", 'unique': 'True', 'null': 'True', 'to': "orm['sell.PaymentHistory']"}),
'total': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '11', 'decimal_places': '2'})
},
'sell.paymenthistory': {
'Meta': {'object_name': 'PaymentHistory'},
'date_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sell.Payment']"}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'sell.sell': {
'Meta': {'object_name': 'Sell'},
'bidder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'cancel': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'date_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payment_method': ('django.db.models.fields.CharField', [], {'default': "'Manual Payment'", 'max_length': '255'}),
'shippingdata': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['sell.ShippingData']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shops.Shop']", 'null': 'True'}),
'total': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '11', 'decimal_places': '2'}),
'total_taxes': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '11', 'decimal_places': '2'}),
'total_without_taxes': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '11', 'decimal_places': '2'})
},
'sell.sellitem': {
'Meta': {'object_name': 'SellItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '11', 'decimal_places': '2'}),
'qty': ('django.db.models.fields.IntegerField', [], {}),
'sell': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sell.Sell']"})
},
'sell.shipping': {
'Meta': {'object_name': 'Shipping'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sell': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['sell.Sell']", 'unique': 'True'}),
'shipping_service': ('django.db.models.fields.CharField', [], {'default': "'Other'", 'max_length': '255'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shops.Shop']"}),
'state_actual': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'shipping_history'", 'unique': 'True', 'null': 'True', 'to': "orm['sell.ShippingHistory']"}),
'tracking_number': ('django.db.models.fields.CharField', [], {'default': "'--'", 'max_length': '255'})
},
'sell.shippingdata': {
'Meta': {'object_name': 'ShippingData'},
'city': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'street_address': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'zip': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'})
},
'sell.shippinghistory': {
'Meta': {'object_name': 'ShippingHistory'},
'date_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shipping': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sell.Shipping']"}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'shops.shop': {
'Meta': {'object_name': 'Shop'},
'admin': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'bids': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'date_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'default': "'39.29038,-76.61219'", 'max_length': '255'}),
'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0'})
}
}
complete_apps = ['sell']
| apache-2.0 | -3,764,220,601,543,705,600 | 73.018868 | 193 | 0.541847 | false |
eusi/MissionPlanerHM | Lib/site-packages/numpy/distutils/from_template.py | 51 | 7890 | #!"C:\Users\hog\Documents\Visual Studio 2010\Projects\ArdupilotMega\ArdupilotMega\bin\Debug\ipy.exe"
"""
process_file(filename)
takes templated file .xxx.src and produces .xxx file where .xxx
is .pyf .f90 or .f using the following template rules:
'<..>' denotes a template.
All function and subroutine blocks in a source file with names that
contain '<..>' will be replicated according to the rules in '<..>'.
The number of comma-separeted words in '<..>' will determine the number of
replicates.
'<..>' may have two different forms, named and short. For example,
named:
<p=d,s,z,c> where anywhere inside a block '<p>' will be replaced with
'd', 's', 'z', and 'c' for each replicate of the block.
<_c> is already defined: <_c=s,d,c,z>
<_t> is already defined: <_t=real,double precision,complex,double complex>
short:
<s,d,c,z>, a short form of the named, useful when no <p> appears inside
a block.
In general, '<..>' contains a comma separated list of arbitrary
expressions. If these expression must contain a comma|leftarrow|rightarrow,
then prepend the comma|leftarrow|rightarrow with a backslash.
If an expression matches '\\<index>' then it will be replaced
by <index>-th expression.
Note that all '<..>' forms in a block must have the same number of
comma-separated entries.
Predefined named template rules:
<prefix=s,d,c,z>
<ftype=real,double precision,complex,double complex>
<ftypereal=real,double precision,\\0,\\1>
<ctype=float,double,complex_float,complex_double>
<ctypereal=float,double,\\0,\\1>
"""
__all__ = ['process_str','process_file']
import os
import sys
import re
routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b',re.I)
routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)',re.I)
function_start_re = re.compile(r'\n (\$|\*)\s*function\b',re.I)
def parse_structure(astr):
""" Return a list of tuples for each function or subroutine each
tuple is the start and end of a subroutine or function to be
expanded.
"""
spanlist = []
ind = 0
while 1:
m = routine_start_re.search(astr,ind)
if m is None:
break
start = m.start()
if function_start_re.match(astr,start,m.end()):
while 1:
i = astr.rfind('\n',ind,start)
if i==-1:
break
start = i
if astr[i:i+7]!='\n $':
break
start += 1
m = routine_end_re.search(astr,m.end())
ind = end = m and m.end()-1 or len(astr)
spanlist.append((start,end))
return spanlist
template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>")
named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>")
list_re = re.compile(r"<\s*((.*?))\s*>")
def find_repl_patterns(astr):
reps = named_re.findall(astr)
names = {}
for rep in reps:
name = rep[0].strip() or unique_key(names)
repl = rep[1].replace('\,','@comma@')
thelist = conv(repl)
names[name] = thelist
return names
item_re = re.compile(r"\A\\(?P<index>\d+)\Z")
def conv(astr):
b = astr.split(',')
l = [x.strip() for x in b]
for i in range(len(l)):
m = item_re.match(l[i])
if m:
j = int(m.group('index'))
l[i] = l[j]
return ','.join(l)
def unique_key(adict):
""" Obtain a unique key given a dictionary."""
allkeys = adict.keys()
done = False
n = 1
while not done:
newkey = '__l%s' % (n)
if newkey in allkeys:
n += 1
else:
done = True
return newkey
template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z')
def expand_sub(substr,names):
substr = substr.replace('\>','@rightarrow@')
substr = substr.replace('\<','@leftarrow@')
lnames = find_repl_patterns(substr)
substr = named_re.sub(r"<\1>",substr) # get rid of definition templates
def listrepl(mobj):
thelist = conv(mobj.group(1).replace('\,','@comma@'))
if template_name_re.match(thelist):
return "<%s>" % (thelist)
name = None
for key in lnames.keys(): # see if list is already in dictionary
if lnames[key] == thelist:
name = key
if name is None: # this list is not in the dictionary yet
name = unique_key(lnames)
lnames[name] = thelist
return "<%s>" % name
substr = list_re.sub(listrepl, substr) # convert all lists to named templates
# newnames are constructed as needed
numsubs = None
base_rule = None
rules = {}
for r in template_re.findall(substr):
if r not in rules:
thelist = lnames.get(r,names.get(r,None))
if thelist is None:
raise ValueError('No replicates found for <%s>' % (r))
if r not in names and not thelist.startswith('_'):
names[r] = thelist
rule = [i.replace('@comma@',',') for i in thelist.split(',')]
num = len(rule)
if numsubs is None:
numsubs = num
rules[r] = rule
base_rule = r
elif num == numsubs:
rules[r] = rule
else:
print("Mismatch in number of replacements (base <%s=%s>)"\
" for <%s=%s>. Ignoring." % (base_rule,
','.join(rules[base_rule]),
r,thelist))
if not rules:
return substr
def namerepl(mobj):
name = mobj.group(1)
return rules.get(name,(k+1)*[name])[k]
newstr = ''
for k in range(numsubs):
newstr += template_re.sub(namerepl, substr) + '\n\n'
newstr = newstr.replace('@rightarrow@','>')
newstr = newstr.replace('@leftarrow@','<')
return newstr
def process_str(allstr):
newstr = allstr
writestr = '' #_head # using _head will break free-format files
struct = parse_structure(newstr)
oldend = 0
names = {}
names.update(_special_names)
for sub in struct:
writestr += newstr[oldend:sub[0]]
names.update(find_repl_patterns(newstr[oldend:sub[0]]))
writestr += expand_sub(newstr[sub[0]:sub[1]],names)
oldend = sub[1]
writestr += newstr[oldend:]
return writestr
include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P<name>[\w\d./\\]+[.]src)['\"]",re.I)
def resolve_includes(source):
d = os.path.dirname(source)
fid = open(source)
lines = []
for line in fid.readlines():
m = include_src_re.match(line)
if m:
fn = m.group('name')
if not os.path.isabs(fn):
fn = os.path.join(d,fn)
if os.path.isfile(fn):
print ('Including file',fn)
lines.extend(resolve_includes(fn))
else:
lines.append(line)
else:
lines.append(line)
fid.close()
return lines
def process_file(source):
lines = resolve_includes(source)
return process_str(''.join(lines))
_special_names = find_repl_patterns('''
<_c=s,d,c,z>
<_t=real,double precision,complex,double complex>
<prefix=s,d,c,z>
<ftype=real,double precision,complex,double complex>
<ctype=float,double,complex_float,complex_double>
<ftypereal=real,double precision,\\0,\\1>
<ctypereal=float,double,\\0,\\1>
''')
if __name__ == "__main__":
try:
file = sys.argv[1]
except IndexError:
fid = sys.stdin
outfile = sys.stdout
else:
fid = open(file,'r')
(base, ext) = os.path.splitext(file)
newname = base
outfile = open(newname,'w')
allstr = fid.read()
writestr = process_str(allstr)
outfile.write(writestr)
| gpl-3.0 | -2,672,470,971,787,896,300 | 29.820313 | 100 | 0.560963 | false |
av8ramit/tensorflow | tensorflow/contrib/distributions/python/ops/bijectors/softplus.py | 81 | 5398 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Softplus bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.ops.distributions import util as distribution_util
__all__ = [
"Softplus",
]
class Softplus(bijector.Bijector):
"""Bijector which computes `Y = g(X) = Log[1 + exp(X)]`.
The softplus `Bijector` has the following two useful properties:
* The domain is the positive real numbers
* `softplus(x) approx x`, for large `x`, so it does not overflow as easily as
the `Exp` `Bijector`.
The optional nonzero `hinge_softness` parameter changes the transition at
zero. With `hinge_softness = c`, the bijector is:
```f_c(x) := c * g(x / c) = c * Log[1 + exp(x / c)].```
For large `x >> 1`, `c * Log[1 + exp(x / c)] approx c * Log[exp(x / c)] = x`,
so the behavior for large `x` is the same as the standard softplus.
As `c > 0` approaches 0 from the right, `f_c(x)` becomes less and less soft,
approaching `max(0, x)`.
* `c = 1` is the default.
* `c > 0` but small means `f(x) approx ReLu(x) = max(0, x)`.
* `c < 0` flips sign and reflects around the `y-axis`: `f_{-c}(x) = -f_c(-x)`.
* `c = 0` results in a non-bijective transformation and triggers an exception.
Example Use:
```python
# Create the Y=g(X)=softplus(X) transform which works only on Tensors with 1
# batch ndim and 2 event ndims (i.e., vector of matrices).
softplus = Softplus(event_ndims=2)
x = [[[1., 2],
[3, 4]],
[[5, 6],
[7, 8]]]
log(1 + exp(x)) == softplus.forward(x)
log(exp(x) - 1) == softplus.inverse(x)
```
Note: log(.) and exp(.) are applied element-wise but the Jacobian is a
reduction over the event space.
"""
@distribution_util.AppendDocstring(
kwargs_dict={
"hinge_softness": (
"Nonzero floating point `Tensor`. Controls the softness of what "
"would otherwise be a kink at the origin. Default is 1.0")})
def __init__(self,
event_ndims=0,
hinge_softness=None,
validate_args=False,
name="softplus"):
with ops.name_scope(name, values=[hinge_softness]):
if hinge_softness is not None:
self._hinge_softness = ops.convert_to_tensor(
hinge_softness, name="hinge_softness")
else:
self._hinge_softness = None
if validate_args:
nonzero_check = check_ops.assert_none_equal(
ops.convert_to_tensor(
0, dtype=self.hinge_softness.dtype),
self.hinge_softness,
message="hinge_softness must be non-zero")
self._hinge_softness = control_flow_ops.with_dependencies(
[nonzero_check], self.hinge_softness)
super(Softplus, self).__init__(
event_ndims=event_ndims,
validate_args=validate_args,
name=name)
def _forward(self, x):
if self.hinge_softness is None:
return nn_ops.softplus(x)
hinge_softness = math_ops.cast(self.hinge_softness, x.dtype)
return hinge_softness * nn_ops.softplus(x / hinge_softness)
def _inverse(self, y):
if self.hinge_softness is None:
return distribution_util.softplus_inverse(y)
hinge_softness = math_ops.cast(self.hinge_softness, y.dtype)
return hinge_softness * distribution_util.softplus_inverse(
y / hinge_softness)
def _inverse_log_det_jacobian(self, y):
# Could also do:
# ildj = math_ops.reduce_sum(y - distribution_util.softplus_inverse(y),
# axis=event_dims)
# but the following is more numerically stable. Ie,
# Y = Log[1 + exp{X}] ==> X = Log[exp{Y} - 1]
# ==> dX/dY = exp{Y} / (exp{Y} - 1)
# = 1 / (1 - exp{-Y}),
# which is the most stable for large Y > 0. For small Y, we use
# 1 - exp{-Y} approx Y.
if self.hinge_softness is not None:
y /= math_ops.cast(self.hinge_softness, y.dtype)
return -math_ops.reduce_sum(math_ops.log(-math_ops.expm1(-y)),
axis=self._event_dims_tensor(y))
def _forward_log_det_jacobian(self, x):
if self.hinge_softness is not None:
x /= math_ops.cast(self.hinge_softness, x.dtype)
return -math_ops.reduce_sum(nn_ops.softplus(-x),
axis=self._event_dims_tensor(x))
@property
def hinge_softness(self):
return self._hinge_softness
| apache-2.0 | 2,437,748,543,157,167,600 | 36.486111 | 80 | 0.62375 | false |
mith1979/ansible_automation | applied_python/applied_python/lib64/python2.7/site-packages/Crypto/Hash/MD4.py | 124 | 2716 | # -*- coding: utf-8 -*-
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""MD4 cryptographic hash algorithm.
MD4 is specified in RFC1320_ and produces the 128 bit digest of a message.
>>> from Crypto.Hash import MD4
>>>
>>> h = MD4.new()
>>> h.update(b'Hello')
>>> print h.hexdigest()
MD4 stand for Message Digest version 4, and it was invented by Rivest in 1990.
This algorithm is insecure. Do not use it for new designs.
.. _RFC1320: http://tools.ietf.org/html/rfc1320
"""
_revision__ = "$Id$"
__all__ = ['new', 'digest_size', 'MD4Hash' ]
from Crypto.Util.py3compat import *
from Crypto.Hash.hashalgo import HashAlgo
import Crypto.Hash._MD4 as _MD4
hashFactory = _MD4
class MD4Hash(HashAlgo):
"""Class that implements an MD4 hash
:undocumented: block_size
"""
#: ASN.1 Object identifier (OID)::
#:
#: id-md2 OBJECT IDENTIFIER ::= {
#: iso(1) member-body(2) us(840) rsadsi(113549)
#: digestAlgorithm(2) 4
#: }
#:
#: This value uniquely identifies the MD4 algorithm.
oid = b('\x06\x08\x2a\x86\x48\x86\xf7\x0d\x02\x04')
digest_size = 16
block_size = 64
def __init__(self, data=None):
HashAlgo.__init__(self, hashFactory, data)
def new(self, data=None):
return MD4Hash(data)
def new(data=None):
"""Return a fresh instance of the hash object.
:Parameters:
data : byte string
The very first chunk of the message to hash.
It is equivalent to an early call to `MD4Hash.update()`.
Optional.
:Return: A `MD4Hash` object
"""
return MD4Hash().new(data)
#: The size of the resulting hash in bytes.
digest_size = MD4Hash.digest_size
#: The internal block size of the hash algorithm in bytes.
block_size = MD4Hash.block_size
| apache-2.0 | -602,851,722,341,652,100 | 28.846154 | 78 | 0.643225 | false |
40223125/w17t2 | static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/event.py | 603 | 19086 | #!/usr/bin/env python
'''Pygame module for interacting with events and queues.
Pygame handles all it's event messaging through an event queue. The routines
in this module help you manage that event queue. The input queue is heavily
dependent on the pygame display module. If the display has not been
initialized and a video mode not set, the event queue will not really work.
The queue is a regular queue of Event objects, there are a variety of ways
to access the events it contains. From simply checking for the existance of
events, to grabbing them directly off the stack.
All events have a type identifier. This event type is in between the values
of NOEVENT and NUMEVENTS. All user defined events can have the value of
USEREVENT or higher. It is recommended make sure your event id's follow this
system.
To get the state of various input devices, you can forego the event queue
and access the input devices directly with their appropriate modules; mouse,
key, and joystick. If you use this method, remember that pygame requires some
form of communication with the system window manager and other parts of the
platform. To keep pygame in synch with the system, you will need to call
pygame.event.pump() to keep everything current. You'll want to call this
function usually once per game loop.
The event queue offers some simple filtering. This can help performance
slightly by blocking certain event types from the queue, use the
pygame.event.set_allowed() and pygame.event.set_blocked() to work with
this filtering. All events default to allowed.
Joysticks will not send any events until the device has been initialized.
An Event object contains an event type and a readonly set of member data.
The Event object contains no method functions, just member data. Event
objects are retrieved from the pygame event queue. You can create your
own new events with the pygame.event.Event() function.
Your program must take steps to keep the event queue from overflowing. If the
program is not clearing or getting all events off the queue at regular
intervals, it can overflow. When the queue overflows an exception is thrown.
All Event objects contain an event type identifier in the Event.type member.
You may also get full access to the Event's member data through the Event.dict
method. All other member lookups will be passed through to the Event's
dictionary values.
While debugging and experimenting, you can print the Event objects for a
quick display of its type and members. Events that come from the system
will have a guaranteed set of member items based on the type. Here is a
list of the Event members that are defined with each type.
QUIT
(none)
ACTIVEEVENT
gain, state
KEYDOWN
unicode, key, mod
KEYUP
key, mod
MOUSEMOTION
pos, rel, buttons
MOUSEBUTTONUP
pos, button
MOUSEBUTTONDOWN
pos, button
JOYAXISMOTION
joy, axis, value
JOYBALLMOTION
joy, ball, rel
JOYHATMOTION
joy, hat, value
JOYBUTTONUP
joy, button
JOYBUTTONDOWN
joy, button
VIDEORESIZE
size, w, h
VIDEOEXPOSE
(none)
USEREVENT
code
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
from copy import copy
#from ctypes import * #brython
from SDL import *
import pygame.base
import pygame.locals
import pygame.display
def pump():
'''Internally process pygame event handlers.
For each frame of your game, you will need to make some sort of call to
the event queue. This ensures your program can internally interact with
the rest of the operating system. If you are not using other event
functions in your game, you should call pygame.event.pump() to allow
pygame to handle internal actions.
This function is not necessary if your program is consistently processing
events on the queue through the other pygame.event functions.
There are important things that must be dealt with internally in the event
queue. The main window may need to be repainted or respond to the system.
If you fail to make a call to the event queue for too long, the system may
decide your program has locked up.
'''
pygame.display._video_init_check()
SDL_PumpEvents()
def get(typelist=None):
'''Get events from the queue.
pygame.event.get(): return Eventlist
pygame.event.get(type): return Eventlist
pygame.event.get(typelist): return Eventlist
This will get all the messages and remove them from the queue. If a type
or sequence of types is given only those messages will be removed from the
queue.
If you are only taking specific events from the queue, be aware that the
queue could eventually fill up with the events you are not interested.
:Parameters:
`typelist` : int or sequence of int
Event type or list of event types that can be returned.
:rtype: list of `Event`
'''
pygame.display._video_init_check()
if typelist is None:
mask = SDL_ALLEVENTS
else:
if hasattr(typelist, '__len__'):
mask = reduce(lambda a,b: a | SDL_EVENTMASK(b), typelist, 0)
else:
mask = int(typelist)
SDL_PumpEvents()
events = []
new_events = SDL_PeepEvents(1, SDL_GETEVENT, mask)
while new_events:
events.append(Event(0, sdl_event=new_events[0]))
new_events = SDL_PeepEvents(1, SDL_GETEVENT, mask)
return events
def poll():
'''Get a single event from the queue.
Returns a single event from the queue. If the event queue is empty an event
of type pygame.NOEVENT will be returned immediately. The returned event is
removed from the queue.
:rtype: Event
'''
pygame.display._video_init_check()
event = SDL_PollEventAndReturn()
if event:
return Event(0, sdl_event=event, keep_userdata=True)
else:
return Event(pygame.locals.NOEVENT)
def wait():
'''Wait for a single event from the queue.
Returns a single event from the queue. If the queue is empty this function
will wait until one is created. While the program is waiting it will sleep
in an idle state. This is important for programs that want to share the
system with other applications.
:rtype: Event
'''
pygame.display._video_init_check()
return Event(0, sdl_event=SDL_WaitEventAndReturn())
def peek(typelist=None):
'''Test if event types are waiting on the queue.
Returns true if there are any events of the given type waiting on the
queue. If a sequence of event types is passed, this will return True if
any of those events are on the queue.
:Parameters:
`typelist` : int or sequence of int
Event type or list of event types to look for.
:rtype: bool
'''
pygame.display._video_init_check()
if typelist is None:
mask = SDL_ALLEVENTS
else:
if hasattr(typelist, '__len__'):
mask = reduce(lambda a,b: a | SDL_EVENTMASK(b), typelist, 0)
else:
mask = SDL_EVENTMASK(int(typelist))
SDL_PumpEvents()
events = SDL_PeepEvents(1, SDL_PEEKEVENT, mask)
if typelist is None:
if events:
return Event(0, sdl_event=events[0], keep_userdata=True)
else:
return Event(pygame.locals.NOEVENT) # XXX deviation from pygame
return len(events) > 0
def clear(typelist=None):
'''Remove all events from the queue.
Remove all events or events of a specific type from the queue. This has the
same effect as `get` except nothing is returned. This can be slightly more
effecient when clearing a full event queue.
:Parameters:
`typelist` : int or sequence of int
Event type or list of event types to remove.
'''
pygame.display._video_init_check()
if typelist is None:
mask = SDL_ALLEVENTS
else:
if hasattr(typelist, '__len__'):
mask = reduce(lambda a,b: a | SDL_EVENTMASK(b), typelist, 0)
else:
mask = int(typelist)
SDL_PumpEvents()
events = []
new_events = SDL_PeepEvents(1, SDL_GETEVENT, mask)
while new_events:
new_events = SDL_PeepEvents(1, SDL_GETEVENT, mask)
_event_names = {
SDL_ACTIVEEVENT: 'ActiveEvent',
SDL_KEYDOWN: 'KeyDown',
SDL_KEYUP: 'KeyUp',
SDL_MOUSEMOTION: 'MouseMotion',
SDL_MOUSEBUTTONDOWN:'MouseButtonDown',
SDL_MOUSEBUTTONUP: 'MouseButtonUp',
SDL_JOYAXISMOTION: 'JoyAxisMotion',
SDL_JOYBALLMOTION: 'JoyBallMotion',
SDL_JOYHATMOTION: 'JoyHatMotion',
SDL_JOYBUTTONUP: 'JoyButtonUp',
SDL_JOYBUTTONDOWN: 'JoyButtonDown',
SDL_QUIT: 'Quit',
SDL_SYSWMEVENT: 'SysWMEvent',
SDL_VIDEORESIZE: 'VideoResize',
SDL_VIDEOEXPOSE: 'VideoExpose',
SDL_NOEVENT: 'NoEvent'
}
def event_name(event_type):
'''Get the string name from an event id.
Pygame uses integer ids to represent the event types. If you want to
report these types to the user they should be converted to strings. This
will return a the simple name for an event type. The string is in the
CamelCase style.
:Parameters:
- `event_type`: int
:rtype: str
'''
if event_type >= SDL_USEREVENT and event_type < SDL_NUMEVENTS:
return 'UserEvent'
return _event_names.get(event_type, 'Unknown')
def set_blocked(typelist):
'''Control which events are allowed on the queue.
The given event types are not allowed to appear on the event queue. By
default all events can be placed on the queue. It is safe to disable an
event type multiple times.
If None is passed as the argument, this has the opposite effect and none of
the event types are allowed to be placed on the queue.
:note: events posted with `post` will not be blocked.
:Parameters:
`typelist` : int or sequence of int or None
Event type or list of event types to disallow.
'''
pygame.display._video_init_check()
if typelist is None:
SDL_EventState(SDL_ALLEVENTS, SDL_IGNORE)
elif hasattr(typelist, '__len__'):
for val in typelist:
SDL_EventState(val, SDL_IGNORE)
else:
SDL_EventState(typelist, SDL_IGNORE)
def set_allowed(typelist):
'''Control which events are allowed on the queue.
The given event types are allowed to appear on the event queue. By default
all events can be placed on the queue. It is safe to enable an event type
multiple times.
If None is passed as the argument, this has the opposite effect and all of
the event types are allowed to be placed on the queue.
:Parameters:
`typelist` : int or sequence of int or None
Event type or list of event types to disallow.
'''
pygame.display._video_init_check()
if typelist is None:
SDL_EventState(SDL_ALLEVENTS, SDL_ENABLE)
elif hasattr(typelist, '__len__'):
for val in typelist:
SDL_EventState(val, SDL_ENABLE)
else:
SDL_EventState(typelist, SDL_ENABLE)
def get_blocked(typelist):
'''Test if a type of event is blocked from the queue.
Returns true if the given event type is blocked from the queue.
:Parameters:
- `event_type`: int
:rtype: int
'''
pygame.display._video_init_check()
if typelist == None:
return SDL_EventState(SDL_ALLEVENTS, SDL_QUERY) == SDL_ENABLE
elif hasattr(typelist, '__len__'): # XXX undocumented behaviour
for val in typelist:
if SDL_EventState(val, SDL_QUERY) == SDL_ENABLE:
return True
return False
else:
return SDL_EventState(typelist, SDL_QUERY) == SDL_ENABLE
def set_grab(grab):
'''Control the sharing of input devices with other applications.
When your program runs in a windowed environment, it will share the mouse
and keyboard devices with other applications that have focus. If your
program sets the event grab to True, it will lock all input into your
program.
It is best to not always grab the input, since it prevents the user from
doing other things on their system.
:Parameters:
- `grab`: bool
'''
pygame.display._video_init_check()
if grab:
SDL_WM_GrabInput(SDL_GRAB_ON)
else:
SDL_WM_GrabInput(SDL_GRAB_OFF)
def get_grab():
'''Test if the program is sharing input devices.
Returns true when the input events are grabbed for this application. Use
`set_grab` to control this state.
:rtype: bool
'''
pygame.display._video_init_check()
return SDL_WM_GrabInput(SDL_GRAB_QUERY) == SDL_GRAB_ON
_USEROBJECT_CHECK1 = int(0xdeadbeef) # signed
_USEROBJECT_CHECK2 = 0xfeedf00d
_user_event_objects = {}
_user_event_nextid = 1
def post(event):
'''Place a new event on the queue.
This places a new event at the end of the event queue. These Events will
later be retrieved from the other queue functions.
This is usually used for placing pygame.USEREVENT events on the queue.
Although any type of event can be placed, if using the sytem event types
your program should be sure to create the standard attributes with
appropriate values.
:Parameters:
`event` : Event
Event to add to the queue.
'''
global _user_event_nextid
pygame.display._video_init_check()
sdl_event = SDL_Event(event.type)
sdl_event.user.code = _USEROBJECT_CHECK1
sdl_event.user.data1 = c_void_p(_USEROBJECT_CHECK2)
sdl_event.user.data2 = c_void_p(_user_event_nextid)
_user_event_objects[_user_event_nextid] = event
_user_event_nextid += 1
SDL_PushEvent(sdl_event)
class Event:
def __init__(self, event_type, event_dict=None, sdl_event=None,
keep_userdata=False, **attributes):
'''Create a new event object.
Creates a new event with the given type. The event is created with the
given attributes and values. The attributes can come from a dictionary
argument, or as string keys from a dictionary.
The given attributes will be readonly attributes on the new event
object itself. These are the only attributes on the Event object,
there are no methods attached to Event objects.
:Parameters:
`event_type` : int
Event type to create
`event_dict` : dict
Dictionary of attributes to assign.
`sdl_event` : `SDL_Event`
Construct a Pygame event from the given SDL_Event; used
internally.
`keep_userdata` : bool
Used internally.
`attributes` : additional keyword arguments
Additional attributes to assign to the event.
'''
if sdl_event:
uevent = cast(pointer(sdl_event), POINTER(SDL_UserEvent)).contents
if uevent.code == _USEROBJECT_CHECK1 and \
uevent.data1 == _USEROBJECT_CHECK2 and \
uevent.data2 in _user_event_objects:
# An event that was posted; grab dict from local store.
id = sdl_event.data2
for key, value in _user_event_objects[id].__dict__.items():
setattr(self, key, value)
# Free memory unless just peeking
if not keep_userdata:
del _user_event_objects[id]
else:
# Standard SDL event
self.type = sdl_event.type
if self.type == SDL_QUIT:
pass
elif self.type == SDL_ACTIVEEVENT:
self.gain = sdl_event.gain
self.state = sdl_event.state
elif self.type == SDL_KEYDOWN:
self.unicode = sdl_event.keysym.unicode
self.key = sdl_event.keysym.sym
self.mod = sdl_event.keysym.mod
elif self.type == SDL_KEYUP:
self.key = sdl_event.keysym.sym
self.mod = sdl_event.keysym.mod
elif self.type == SDL_MOUSEMOTION:
self.pos = (sdl_event.x, sdl_event.y)
self.rel = (sdl_event.xrel, sdl_event.yrel)
self.buttons = (sdl_event.state & SDL_BUTTON(1) != 0,
sdl_event.state & SDL_BUTTON(2) != 0,
sdl_event.state & SDL_BUTTON(3) != 0)
elif self.type in (SDL_MOUSEBUTTONDOWN, SDL_MOUSEBUTTONUP):
self.pos = (sdl_event.x, sdl_event.y)
self.button = sdl_event.button
elif self.type == SDL_JOYAXISMOTION:
self.joy = sdl_event.which
self.axis = sdl_event.axis
self.value = sdl_event.value / 32767.0
elif self.type == SDL_JOYBALLMOTION:
self.joy = sdl_event.which
self.ball = sdl_event.ball
self.rel = (sdl_event.xrel, sdl_event.yrel)
elif self.type == SDL_JOYHATMOTION:
self.joy = sdl_event.which
self.hat = sdl_event.hat
hx = hy = 0
if sdl_event.value & SDL_HAT_UP:
hy = 1
if sdl_event.value & SDL_HAT_DOWN:
hy = -1
if sdl_event.value & SDL_HAT_RIGHT:
hx = 1
if sdl_event.value & SDL_HAT_LEFT:
hx = -1
self.value = (hx, hy)
elif self.type in (SDL_JOYBUTTONUP, SDL_JOYBUTTONDOWN):
self.joy = sdl_event.which
self.button = sdl_event.button
elif self.type == SDL_VIDEORESIZE:
self.size = (sdl_event.w, sdl_event.h)
self.w = sdl_event.w
self.h = sdl_event.h
elif self.type == SDL_VIDEOEXPOSE:
pass
elif self.type == SDL_SYSWMEVENT:
pass ### XXX: not implemented
elif self.type >= SDL_USEREVENT and self.type < SDL_NUMEVENTS:
self.code = sdl_event.code
else:
# Create an event (not from event queue)
self.type = event_type
if event_dict:
for key, value in event_dict.items():
setattr(self, key, value)
for key, value in attributes.items():
setattr(self, key, value)
# Bizarre undocumented but used by some people.
self.dict = self.__dict__
def __repr__(self):
d = copy(self.__dict__)
del d['type']
return '<Event(%d-%s %r)>' % \
(self.type, event_name(self.type), d)
def __nonzero__(self):
return self.type != SDL_NOEVENT
EventType = Event
| gpl-3.0 | 6,297,146,859,464,066,000 | 34.084559 | 79 | 0.627947 | false |
botswana-harvard/ba-namotswe | ba_namotswe/models/pregnancy.py | 1 | 1266 | from django.db import models
from simple_history.models import HistoricalRecords
from .crf_model_mixin import CrfModelMixin, CrfInlineModelMixin
class PregnancyHistory(CrfModelMixin):
history = HistoricalRecords()
class Meta(CrfModelMixin.Meta):
app_label = 'ba_namotswe'
verbose_name = 'Pregnancy History'
verbose_name_plural = 'Pregnancy History'
class Pregnancy(CrfInlineModelMixin, models.Model):
pregnancy_history = models.ForeignKey(PregnancyHistory)
pregnancy_date = models.DateField(
verbose_name='Date of first clinical documentation of pregnancy',
null=True,
blank=True)
delivery_date = models.DateField(
null=True,
blank=True)
history = HistoricalRecords()
def get_pending_fields(self):
pending_fields = super(Pregnancy, self).get_pending_fields()
if not self.pregnancy_date:
pending_fields.append('pregnancy_date')
if not self.delivery_date:
pending_fields.append('delivery_date')
pending_fields.sort()
return pending_fields
class Meta(CrfInlineModelMixin.Meta):
app_label = 'ba_namotswe'
verbose_name = 'Pregnancy'
crf_inline_parent = 'pregnancy_history'
| gpl-3.0 | 3,529,876,229,850,320,400 | 27.133333 | 73 | 0.678515 | false |
pratapvardhan/pandas | pandas/tests/test_base.py | 2 | 46174 | # -*- coding: utf-8 -*-
from __future__ import print_function
import re
import sys
from datetime import datetime, timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.compat as compat
from pandas.core.dtypes.common import (
is_object_dtype, is_datetimetz, is_datetime64_dtype,
needs_i8_conversion)
import pandas.util.testing as tm
from pandas import (Series, Index, DatetimeIndex, TimedeltaIndex,
PeriodIndex, Timedelta, IntervalIndex, Interval,
CategoricalIndex, Timestamp)
from pandas.compat import StringIO, PYPY, long
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.accessor import PandasDelegate
from pandas.core.base import PandasObject, NoNewAttributesMixin
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
from pandas._libs.tslib import iNaT
class CheckStringMixin(object):
def test_string_methods_dont_fail(self):
repr(self.container)
str(self.container)
bytes(self.container)
if not compat.PY3:
unicode(self.container) # noqa
def test_tricky_container(self):
if not hasattr(self, 'unicode_container'):
pytest.skip('Need unicode_container to test with this')
repr(self.unicode_container)
str(self.unicode_container)
bytes(self.unicode_container)
if not compat.PY3:
unicode(self.unicode_container) # noqa
class CheckImmutable(object):
mutable_regex = re.compile('does not support mutable operations')
def check_mutable_error(self, *args, **kwargs):
# Pass whatever function you normally would to assert_raises_regex
# (after the Exception kind).
tm.assert_raises_regex(
TypeError, self.mutable_regex, *args, **kwargs)
def test_no_mutable_funcs(self):
def setitem():
self.container[0] = 5
self.check_mutable_error(setitem)
def setslice():
self.container[1:2] = 3
self.check_mutable_error(setslice)
def delitem():
del self.container[0]
self.check_mutable_error(delitem)
def delslice():
del self.container[0:3]
self.check_mutable_error(delslice)
mutable_methods = getattr(self, "mutable_methods", [])
for meth in mutable_methods:
self.check_mutable_error(getattr(self.container, meth))
def test_slicing_maintains_type(self):
result = self.container[1:2]
expected = self.lst[1:2]
self.check_result(result, expected)
def check_result(self, result, expected, klass=None):
klass = klass or self.klass
assert isinstance(result, klass)
assert result == expected
class TestPandasDelegate(object):
class Delegator(object):
_properties = ['foo']
_methods = ['bar']
def _set_foo(self, value):
self.foo = value
def _get_foo(self):
return self.foo
foo = property(_get_foo, _set_foo, doc="foo property")
def bar(self, *args, **kwargs):
""" a test bar method """
pass
class Delegate(PandasDelegate, PandasObject):
def __init__(self, obj):
self.obj = obj
def setup_method(self, method):
pass
def test_invalid_delegation(self):
# these show that in order for the delegation to work
# the _delegate_* methods need to be overridden to not raise
# a TypeError
self.Delegate._add_delegate_accessors(
delegate=self.Delegator,
accessors=self.Delegator._properties,
typ='property'
)
self.Delegate._add_delegate_accessors(
delegate=self.Delegator,
accessors=self.Delegator._methods,
typ='method'
)
delegate = self.Delegate(self.Delegator())
def f():
delegate.foo
pytest.raises(TypeError, f)
def f():
delegate.foo = 5
pytest.raises(TypeError, f)
def f():
delegate.foo()
pytest.raises(TypeError, f)
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
# Delegate does not implement memory_usage.
# Check that we fall back to in-built `__sizeof__`
# GH 12924
delegate = self.Delegate(self.Delegator())
sys.getsizeof(delegate)
class Ops(object):
def _allow_na_ops(self, obj):
"""Whether to skip test cases including NaN"""
if (isinstance(obj, Index) and
(obj.is_boolean() or not obj._can_hold_na)):
# don't test boolean / int64 index
return False
return True
def setup_method(self, method):
self.bool_index = tm.makeBoolIndex(10, name='a')
self.int_index = tm.makeIntIndex(10, name='a')
self.float_index = tm.makeFloatIndex(10, name='a')
self.dt_index = tm.makeDateIndex(10, name='a')
self.dt_tz_index = tm.makeDateIndex(10, name='a').tz_localize(
tz='US/Eastern')
self.period_index = tm.makePeriodIndex(10, name='a')
self.string_index = tm.makeStringIndex(10, name='a')
self.unicode_index = tm.makeUnicodeIndex(10, name='a')
arr = np.random.randn(10)
self.int_series = Series(arr, index=self.int_index, name='a')
self.float_series = Series(arr, index=self.float_index, name='a')
self.dt_series = Series(arr, index=self.dt_index, name='a')
self.dt_tz_series = self.dt_tz_index.to_series(keep_tz=True)
self.period_series = Series(arr, index=self.period_index, name='a')
self.string_series = Series(arr, index=self.string_index, name='a')
types = ['bool', 'int', 'float', 'dt', 'dt_tz', 'period', 'string',
'unicode']
fmts = ["{0}_{1}".format(t, f)
for t in types for f in ['index', 'series']]
self.objs = [getattr(self, f)
for f in fmts if getattr(self, f, None) is not None]
def check_ops_properties(self, props, filter=None, ignore_failures=False):
for op in props:
for o in self.is_valid_objs:
# if a filter, skip if it doesn't match
if filter is not None:
filt = o.index if isinstance(o, Series) else o
if not filter(filt):
continue
try:
if isinstance(o, Series):
expected = Series(
getattr(o.index, op), index=o.index, name='a')
else:
expected = getattr(o, op)
except (AttributeError):
if ignore_failures:
continue
result = getattr(o, op)
# these couuld be series, arrays or scalars
if isinstance(result, Series) and isinstance(expected, Series):
tm.assert_series_equal(result, expected)
elif isinstance(result, Index) and isinstance(expected, Index):
tm.assert_index_equal(result, expected)
elif isinstance(result, np.ndarray) and isinstance(expected,
np.ndarray):
tm.assert_numpy_array_equal(result, expected)
else:
assert result == expected
# freq raises AttributeError on an Int64Index because its not
# defined we mostly care about Series here anyhow
if not ignore_failures:
for o in self.not_valid_objs:
# an object that is datetimelike will raise a TypeError,
# otherwise an AttributeError
if issubclass(type(o), DatetimeIndexOpsMixin):
pytest.raises(TypeError, lambda: getattr(o, op))
else:
pytest.raises(AttributeError,
lambda: getattr(o, op))
def test_binary_ops_docs(self):
from pandas import DataFrame, Panel
op_map = {'add': '+',
'sub': '-',
'mul': '*',
'mod': '%',
'pow': '**',
'truediv': '/',
'floordiv': '//'}
for op_name in ['add', 'sub', 'mul', 'mod', 'pow', 'truediv',
'floordiv']:
for klass in [Series, DataFrame, Panel]:
operand1 = klass.__name__.lower()
operand2 = 'other'
op = op_map[op_name]
expected_str = ' '.join([operand1, op, operand2])
assert expected_str in getattr(klass, op_name).__doc__
# reverse version of the binary ops
expected_str = ' '.join([operand2, op, operand1])
assert expected_str in getattr(klass, 'r' + op_name).__doc__
class TestIndexOps(Ops):
def setup_method(self, method):
super(TestIndexOps, self).setup_method(method)
self.is_valid_objs = self.objs
self.not_valid_objs = []
def test_none_comparison(self):
# bug brought up by #1079
# changed from TypeError in 0.17.0
for o in self.is_valid_objs:
if isinstance(o, Series):
o[0] = np.nan
# noinspection PyComparisonWithNone
result = o == None # noqa
assert not result.iat[0]
assert not result.iat[1]
# noinspection PyComparisonWithNone
result = o != None # noqa
assert result.iat[0]
assert result.iat[1]
result = None == o # noqa
assert not result.iat[0]
assert not result.iat[1]
# this fails for numpy < 1.9
# and oddly for *some* platforms
# result = None != o # noqa
# assert result.iat[0]
# assert result.iat[1]
if (is_datetime64_dtype(o) or is_datetimetz(o)):
# Following DatetimeIndex (and Timestamp) convention,
# inequality comparisons with Series[datetime64] raise
with pytest.raises(TypeError):
None > o
with pytest.raises(TypeError):
o > None
else:
result = None > o
assert not result.iat[0]
assert not result.iat[1]
result = o < None
assert not result.iat[0]
assert not result.iat[1]
def test_ndarray_compat_properties(self):
for o in self.objs:
# Check that we work.
for p in ['shape', 'dtype', 'T', 'nbytes']:
assert getattr(o, p, None) is not None
# deprecated properties
for p in ['flags', 'strides', 'itemsize']:
with tm.assert_produces_warning(FutureWarning):
assert getattr(o, p, None) is not None
with tm.assert_produces_warning(FutureWarning):
assert hasattr(o, 'base')
# If we have a datetime-like dtype then needs a view to work
# but the user is responsible for that
try:
with tm.assert_produces_warning(FutureWarning):
assert o.data is not None
except ValueError:
pass
with pytest.raises(ValueError):
o.item() # len > 1
assert o.ndim == 1
assert o.size == len(o)
assert Index([1]).item() == 1
assert Series([1]).item() == 1
def test_ops(self):
for op in ['max', 'min']:
for o in self.objs:
result = getattr(o, op)()
if not isinstance(o, PeriodIndex):
expected = getattr(o.values, op)()
else:
expected = pd.Period(
ordinal=getattr(o._ndarray_values, op)(),
freq=o.freq)
try:
assert result == expected
except TypeError:
# comparing tz-aware series with np.array results in
# TypeError
expected = expected.astype('M8[ns]').astype('int64')
assert result.value == expected
def test_nanops(self):
# GH 7261
for op in ['max', 'min']:
for klass in [Index, Series]:
obj = klass([np.nan, 2.0])
assert getattr(obj, op)() == 2.0
obj = klass([np.nan])
assert pd.isna(getattr(obj, op)())
obj = klass([])
assert pd.isna(getattr(obj, op)())
obj = klass([pd.NaT, datetime(2011, 11, 1)])
# check DatetimeIndex monotonic path
assert getattr(obj, op)() == datetime(2011, 11, 1)
obj = klass([pd.NaT, datetime(2011, 11, 1), pd.NaT])
# check DatetimeIndex non-monotonic path
assert getattr(obj, op)(), datetime(2011, 11, 1)
# argmin/max
obj = Index(np.arange(5, dtype='int64'))
assert obj.argmin() == 0
assert obj.argmax() == 4
obj = Index([np.nan, 1, np.nan, 2])
assert obj.argmin() == 1
assert obj.argmax() == 3
obj = Index([np.nan])
assert obj.argmin() == -1
assert obj.argmax() == -1
obj = Index([pd.NaT, datetime(2011, 11, 1), datetime(2011, 11, 2),
pd.NaT])
assert obj.argmin() == 1
assert obj.argmax() == 2
obj = Index([pd.NaT])
assert obj.argmin() == -1
assert obj.argmax() == -1
def test_value_counts_unique_nunique(self):
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._values
if isinstance(values, Index):
# reset name not to affect latter process
values.name = None
# create repeated values, 'n'th element is repeated by n+1 times
# skip boolean, because it only has 2 values at most
if isinstance(o, Index) and o.is_boolean():
continue
elif isinstance(o, Index):
expected_index = Index(o[::-1])
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = 'a'
else:
expected_index = Index(values[::-1])
idx = o.index.repeat(range(1, len(o) + 1))
rep = np.repeat(values, range(1, len(o) + 1))
o = klass(rep, index=idx, name='a')
# check values has the same dtype as the original
assert o.dtype == orig.dtype
expected_s = Series(range(10, 0, -1), index=expected_index,
dtype='int64', name='a')
result = o.value_counts()
tm.assert_series_equal(result, expected_s)
assert result.index.name is None
assert result.name == 'a'
result = o.unique()
if isinstance(o, Index):
assert isinstance(result, o.__class__)
tm.assert_index_equal(result, orig)
elif is_datetimetz(o):
# datetimetz Series returns array of Timestamp
assert result[0] == orig[0]
for r in result:
assert isinstance(r, Timestamp)
tm.assert_numpy_array_equal(result,
orig._values.astype(object).values)
else:
tm.assert_numpy_array_equal(result, orig.values)
assert o.nunique() == len(np.unique(o.values))
def test_value_counts_unique_nunique_null(self):
for null_obj in [np.nan, None]:
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._ndarray_values
if not self._allow_na_ops(o):
continue
# special assign to the numpy array
if is_datetimetz(o):
if isinstance(o, DatetimeIndex):
v = o.asi8
v[0:2] = iNaT
values = o._shallow_copy(v)
else:
o = o.copy()
o[0:2] = iNaT
values = o._values
elif needs_i8_conversion(o):
values[0:2] = iNaT
values = o._shallow_copy(values)
else:
values[0:2] = null_obj
# check values has the same dtype as the original
assert values.dtype == o.dtype
# create repeated values, 'n'th element is repeated by n+1
# times
if isinstance(o, (DatetimeIndex, PeriodIndex)):
expected_index = o.copy()
expected_index.name = None
# attach name to klass
o = klass(values.repeat(range(1, len(o) + 1)))
o.name = 'a'
else:
if is_datetimetz(o):
expected_index = orig._values._shallow_copy(values)
else:
expected_index = Index(values)
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = 'a'
# check values has the same dtype as the original
assert o.dtype == orig.dtype
# check values correctly have NaN
nanloc = np.zeros(len(o), dtype=np.bool)
nanloc[:3] = True
if isinstance(o, Index):
tm.assert_numpy_array_equal(pd.isna(o), nanloc)
else:
exp = Series(nanloc, o.index, name='a')
tm.assert_series_equal(pd.isna(o), exp)
expected_s_na = Series(list(range(10, 2, -1)) + [3],
index=expected_index[9:0:-1],
dtype='int64', name='a')
expected_s = Series(list(range(10, 2, -1)),
index=expected_index[9:1:-1],
dtype='int64', name='a')
result_s_na = o.value_counts(dropna=False)
tm.assert_series_equal(result_s_na, expected_s_na)
assert result_s_na.index.name is None
assert result_s_na.name == 'a'
result_s = o.value_counts()
tm.assert_series_equal(o.value_counts(), expected_s)
assert result_s.index.name is None
assert result_s.name == 'a'
result = o.unique()
if isinstance(o, Index):
tm.assert_index_equal(result,
Index(values[1:], name='a'))
elif is_datetimetz(o):
# unable to compare NaT / nan
vals = values[2:].astype(object).values
tm.assert_numpy_array_equal(result[1:], vals)
assert result[0] is pd.NaT
else:
tm.assert_numpy_array_equal(result[1:], values[2:])
assert pd.isna(result[0])
assert result.dtype == orig.dtype
assert o.nunique() == 8
assert o.nunique(dropna=False) == 9
def test_value_counts_inferred(self):
klasses = [Index, Series]
for klass in klasses:
s_values = ['a', 'b', 'b', 'b', 'b', 'c', 'd', 'd', 'a', 'a']
s = klass(s_values)
expected = Series([4, 3, 2, 1], index=['b', 'a', 'd', 'c'])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(np.unique(np.array(s_values, dtype=np.object_)))
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.unique(np.array(s_values, dtype=np.object_))
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 4
# don't sort, have to sort after the fact as not sorting is
# platform-dep
hist = s.value_counts(sort=False).sort_values()
expected = Series([3, 1, 4, 2], index=list('acbd')).sort_values()
tm.assert_series_equal(hist, expected)
# sort ascending
hist = s.value_counts(ascending=True)
expected = Series([1, 2, 3, 4], index=list('cdab'))
tm.assert_series_equal(hist, expected)
# relative histogram.
hist = s.value_counts(normalize=True)
expected = Series([.4, .3, .2, .1], index=['b', 'a', 'd', 'c'])
tm.assert_series_equal(hist, expected)
def test_value_counts_bins(self):
klasses = [Index, Series]
for klass in klasses:
s_values = ['a', 'b', 'b', 'b', 'b', 'c', 'd', 'd', 'a', 'a']
s = klass(s_values)
# bins
pytest.raises(TypeError, lambda bins: s.value_counts(bins=bins), 1)
s1 = Series([1, 1, 2, 3])
res1 = s1.value_counts(bins=1)
exp1 = Series({Interval(0.997, 3.0): 4})
tm.assert_series_equal(res1, exp1)
res1n = s1.value_counts(bins=1, normalize=True)
exp1n = Series({Interval(0.997, 3.0): 1.0})
tm.assert_series_equal(res1n, exp1n)
if isinstance(s1, Index):
tm.assert_index_equal(s1.unique(), Index([1, 2, 3]))
else:
exp = np.array([1, 2, 3], dtype=np.int64)
tm.assert_numpy_array_equal(s1.unique(), exp)
assert s1.nunique() == 3
# these return the same
res4 = s1.value_counts(bins=4, dropna=True)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4, exp4)
res4 = s1.value_counts(bins=4, dropna=False)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4, exp4)
res4n = s1.value_counts(bins=4, normalize=True)
exp4n = Series([0.5, 0.25, 0.25, 0],
index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4n, exp4n)
# handle NA's properly
s_values = ['a', 'b', 'b', 'b', np.nan, np.nan,
'd', 'd', 'a', 'a', 'b']
s = klass(s_values)
expected = Series([4, 3, 2], index=['b', 'a', 'd'])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(['a', 'b', np.nan, 'd'])
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.array(['a', 'b', np.nan, 'd'], dtype=object)
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 3
s = klass({})
expected = Series([], dtype=np.int64)
tm.assert_series_equal(s.value_counts(), expected,
check_index_type=False)
# returned dtype differs depending on original
if isinstance(s, Index):
tm.assert_index_equal(s.unique(), Index([]), exact=False)
else:
tm.assert_numpy_array_equal(s.unique(), np.array([]),
check_dtype=False)
assert s.nunique() == 0
@pytest.mark.parametrize('klass', [Index, Series])
def test_value_counts_datetime64(self, klass):
# GH 3002, datetime64[ns]
# don't test names though
txt = "\n".join(['xxyyzz20100101PIE', 'xxyyzz20100101GUM',
'xxyyzz20100101EGG', 'xxyyww20090101EGG',
'foofoo20080909PIE', 'foofoo20080909GUM'])
f = StringIO(txt)
df = pd.read_fwf(f, widths=[6, 8, 3],
names=["person_id", "dt", "food"],
parse_dates=["dt"])
s = klass(df['dt'].copy())
s.name = None
idx = pd.to_datetime(['2010-01-01 00:00:00Z',
'2008-09-09 00:00:00Z',
'2009-01-01 00:00:00Z'])
expected_s = Series([3, 2, 1], index=idx)
tm.assert_series_equal(s.value_counts(), expected_s)
expected = np_array_datetime64_compat(['2010-01-01 00:00:00Z',
'2009-01-01 00:00:00Z',
'2008-09-09 00:00:00Z'],
dtype='datetime64[ns]')
if isinstance(s, Index):
tm.assert_index_equal(s.unique(), DatetimeIndex(expected))
else:
tm.assert_numpy_array_equal(s.unique(), expected)
assert s.nunique() == 3
# with NaT
s = df['dt'].copy()
s = klass([v for v in s.values] + [pd.NaT])
result = s.value_counts()
assert result.index.dtype == 'datetime64[ns]'
tm.assert_series_equal(result, expected_s)
result = s.value_counts(dropna=False)
expected_s[pd.NaT] = 1
tm.assert_series_equal(result, expected_s)
unique = s.unique()
assert unique.dtype == 'datetime64[ns]'
# numpy_array_equal cannot compare pd.NaT
if isinstance(s, Index):
exp_idx = DatetimeIndex(expected.tolist() + [pd.NaT])
tm.assert_index_equal(unique, exp_idx)
else:
tm.assert_numpy_array_equal(unique[:3], expected)
assert pd.isna(unique[3])
assert s.nunique() == 3
assert s.nunique(dropna=False) == 4
# timedelta64[ns]
td = df.dt - df.dt + timedelta(1)
td = klass(td, name='dt')
result = td.value_counts()
expected_s = Series([6], index=[Timedelta('1day')], name='dt')
tm.assert_series_equal(result, expected_s)
expected = TimedeltaIndex(['1 days'], name='dt')
if isinstance(td, Index):
tm.assert_index_equal(td.unique(), expected)
else:
tm.assert_numpy_array_equal(td.unique(), expected.values)
td2 = timedelta(1) + (df.dt - df.dt)
td2 = klass(td2, name='dt')
result2 = td2.value_counts()
tm.assert_series_equal(result2, expected_s)
def test_factorize(self):
for orig in self.objs:
o = orig.copy()
if isinstance(o, Index) and o.is_boolean():
exp_arr = np.array([0, 1] + [0] * 8, dtype=np.intp)
exp_uniques = o
exp_uniques = Index([False, True])
else:
exp_arr = np.array(range(len(o)), dtype=np.intp)
exp_uniques = o
labels, uniques = o.factorize()
tm.assert_numpy_array_equal(labels, exp_arr)
if isinstance(o, Series):
tm.assert_index_equal(uniques, Index(orig),
check_names=False)
else:
# factorize explicitly resets name
tm.assert_index_equal(uniques, exp_uniques,
check_names=False)
def test_factorize_repeated(self):
for orig in self.objs:
o = orig.copy()
# don't test boolean
if isinstance(o, Index) and o.is_boolean():
continue
# sort by value, and create duplicates
if isinstance(o, Series):
o = o.sort_values()
n = o.iloc[5:].append(o)
else:
indexer = o.argsort()
o = o.take(indexer)
n = o[5:].append(o)
exp_arr = np.array([5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
dtype=np.intp)
labels, uniques = n.factorize(sort=True)
tm.assert_numpy_array_equal(labels, exp_arr)
if isinstance(o, Series):
tm.assert_index_equal(uniques, Index(orig).sort_values(),
check_names=False)
else:
tm.assert_index_equal(uniques, o, check_names=False)
exp_arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4],
np.intp)
labels, uniques = n.factorize(sort=False)
tm.assert_numpy_array_equal(labels, exp_arr)
if isinstance(o, Series):
expected = Index(o.iloc[5:10].append(o.iloc[:5]))
tm.assert_index_equal(uniques, expected, check_names=False)
else:
expected = o[5:10].append(o[:5])
tm.assert_index_equal(uniques, expected, check_names=False)
def test_duplicated_drop_duplicates_index(self):
# GH 4060
for original in self.objs:
if isinstance(original, Index):
# special case
if original.is_boolean():
result = original.drop_duplicates()
expected = Index([False, True], name='a')
tm.assert_index_equal(result, expected)
continue
# original doesn't have duplicates
expected = np.array([False] * len(original), dtype=bool)
duplicated = original.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = original.drop_duplicates()
tm.assert_index_equal(result, original)
assert result is not original
# has_duplicates
assert not original.has_duplicates
# create repeated values, 3rd and 5th values are duplicated
idx = original[list(range(len(original))) + [5, 3]]
expected = np.array([False] * len(original) + [True, True],
dtype=bool)
duplicated = idx.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
tm.assert_index_equal(idx.drop_duplicates(), original)
base = [False] * len(idx)
base[3] = True
base[5] = True
expected = np.array(base)
duplicated = idx.duplicated(keep='last')
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = idx.drop_duplicates(keep='last')
tm.assert_index_equal(result, idx[~expected])
base = [False] * len(original) + [True, True]
base[3] = True
base[5] = True
expected = np.array(base)
duplicated = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = idx.drop_duplicates(keep=False)
tm.assert_index_equal(result, idx[~expected])
with tm.assert_raises_regex(
TypeError, r"drop_duplicates\(\) got an unexpected "
"keyword argument"):
idx.drop_duplicates(inplace=True)
else:
expected = Series([False] * len(original),
index=original.index, name='a')
tm.assert_series_equal(original.duplicated(), expected)
result = original.drop_duplicates()
tm.assert_series_equal(result, original)
assert result is not original
idx = original.index[list(range(len(original))) + [5, 3]]
values = original._values[list(range(len(original))) + [5, 3]]
s = Series(values, index=idx, name='a')
expected = Series([False] * len(original) + [True, True],
index=idx, name='a')
tm.assert_series_equal(s.duplicated(), expected)
tm.assert_series_equal(s.drop_duplicates(), original)
base = [False] * len(idx)
base[3] = True
base[5] = True
expected = Series(base, index=idx, name='a')
tm.assert_series_equal(s.duplicated(keep='last'), expected)
tm.assert_series_equal(s.drop_duplicates(keep='last'),
s[~np.array(base)])
base = [False] * len(original) + [True, True]
base[3] = True
base[5] = True
expected = Series(base, index=idx, name='a')
tm.assert_series_equal(s.duplicated(keep=False), expected)
tm.assert_series_equal(s.drop_duplicates(keep=False),
s[~np.array(base)])
s.drop_duplicates(inplace=True)
tm.assert_series_equal(s, original)
def test_drop_duplicates_series_vs_dataframe(self):
# GH 14192
df = pd.DataFrame({'a': [1, 1, 1, 'one', 'one'],
'b': [2, 2, np.nan, np.nan, np.nan],
'c': [3, 3, np.nan, np.nan, 'three'],
'd': [1, 2, 3, 4, 4],
'e': [datetime(2015, 1, 1), datetime(2015, 1, 1),
datetime(2015, 2, 1), pd.NaT, pd.NaT]
})
for column in df.columns:
for keep in ['first', 'last', False]:
dropped_frame = df[[column]].drop_duplicates(keep=keep)
dropped_series = df[column].drop_duplicates(keep=keep)
tm.assert_frame_equal(dropped_frame, dropped_series.to_frame())
def test_fillna(self):
# # GH 11343
# though Index.fillna and Series.fillna has separate impl,
# test here to confirm these works as the same
for orig in self.objs:
o = orig.copy()
values = o.values
# values will not be changed
result = o.fillna(o.astype(object).values[0])
if isinstance(o, Index):
tm.assert_index_equal(o, result)
else:
tm.assert_series_equal(o, result)
# check shallow_copied
assert o is not result
for null_obj in [np.nan, None]:
for orig in self.objs:
o = orig.copy()
klass = type(o)
if not self._allow_na_ops(o):
continue
if needs_i8_conversion(o):
values = o.astype(object).values
fill_value = values[0]
values[0:2] = pd.NaT
else:
values = o.values.copy()
fill_value = o.values[0]
values[0:2] = null_obj
expected = [fill_value] * 2 + list(values[2:])
expected = klass(expected)
o = klass(values)
# check values has the same dtype as the original
assert o.dtype == orig.dtype
result = o.fillna(fill_value)
if isinstance(o, Index):
tm.assert_index_equal(result, expected)
else:
tm.assert_series_equal(result, expected)
# check shallow_copied
assert o is not result
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
for o in self.objs:
res = o.memory_usage()
res_deep = o.memory_usage(deep=True)
if (is_object_dtype(o) or (isinstance(o, Series) and
is_object_dtype(o.index))):
# if there are objects, only deep will pick them up
assert res_deep > res
else:
assert res == res_deep
if isinstance(o, Series):
assert ((o.memory_usage(index=False) +
o.index.memory_usage()) ==
o.memory_usage(index=True))
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = res_deep - sys.getsizeof(o)
assert abs(diff) < 100
def test_searchsorted(self):
# See gh-12238
for o in self.objs:
index = np.searchsorted(o, max(o))
assert 0 <= index <= len(o)
index = np.searchsorted(o, max(o), sorter=range(len(o)))
assert 0 <= index <= len(o)
def test_validate_bool_args(self):
invalid_values = [1, "True", [1, 2, 3], 5.0]
for value in invalid_values:
with pytest.raises(ValueError):
self.int_series.drop_duplicates(inplace=value)
class TestTranspose(Ops):
errmsg = "the 'axes' parameter is not supported"
def test_transpose(self):
for obj in self.objs:
if isinstance(obj, Index):
tm.assert_index_equal(obj.transpose(), obj)
else:
tm.assert_series_equal(obj.transpose(), obj)
def test_transpose_non_default_axes(self):
for obj in self.objs:
tm.assert_raises_regex(ValueError, self.errmsg,
obj.transpose, 1)
tm.assert_raises_regex(ValueError, self.errmsg,
obj.transpose, axes=1)
def test_numpy_transpose(self):
for obj in self.objs:
if isinstance(obj, Index):
tm.assert_index_equal(np.transpose(obj), obj)
else:
tm.assert_series_equal(np.transpose(obj), obj)
tm.assert_raises_regex(ValueError, self.errmsg,
np.transpose, obj, axes=1)
class TestNoNewAttributesMixin(object):
def test_mixin(self):
class T(NoNewAttributesMixin):
pass
t = T()
assert not hasattr(t, "__frozen")
t.a = "test"
assert t.a == "test"
t._freeze()
assert "__frozen" in dir(t)
assert getattr(t, "__frozen")
def f():
t.b = "test"
pytest.raises(AttributeError, f)
assert not hasattr(t, "b")
class TestToIterable(object):
# test that we convert an iterable to python types
dtypes = [
('int8', (int, long)),
('int16', (int, long)),
('int32', (int, long)),
('int64', (int, long)),
('uint8', (int, long)),
('uint16', (int, long)),
('uint32', (int, long)),
('uint64', (int, long)),
('float16', float),
('float32', float),
('float64', float),
('datetime64[ns]', Timestamp),
('datetime64[ns, US/Eastern]', Timestamp),
('timedelta64[ns]', Timedelta)]
@pytest.mark.parametrize(
'dtype, rdtype', dtypes)
@pytest.mark.parametrize(
'method',
[
lambda x: x.tolist(),
lambda x: list(x),
lambda x: list(x.__iter__()),
], ids=['tolist', 'list', 'iter'])
@pytest.mark.parametrize('typ', [Series, Index])
def test_iterable(self, typ, method, dtype, rdtype):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
s = typ([1], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
'dtype, rdtype, obj',
[
('object', object, 'a'),
('object', (int, long), 1),
('category', object, 'a'),
('category', (int, long), 1)])
@pytest.mark.parametrize(
'method',
[
lambda x: x.tolist(),
lambda x: list(x),
lambda x: list(x.__iter__()),
], ids=['tolist', 'list', 'iter'])
@pytest.mark.parametrize('typ', [Series, Index])
def test_iterable_object_and_category(self, typ, method,
dtype, rdtype, obj):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
s = typ([obj], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
'dtype, rdtype', dtypes)
def test_iterable_items(self, dtype, rdtype):
# gh-13258
# test items / iteritems yields the correct boxed scalars
# this only applies to series
s = Series([1], dtype=dtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
_, result = list(s.iteritems())[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
'dtype, rdtype',
dtypes + [
('object', (int, long)),
('category', (int, long))])
@pytest.mark.parametrize('typ', [Series, Index])
def test_iterable_map(self, typ, dtype, rdtype):
# gh-13236
# coerce iteration to underlying python / pandas types
s = typ([1], dtype=dtype)
result = s.map(type)[0]
if not isinstance(rdtype, tuple):
rdtype = tuple([rdtype])
assert result in rdtype
@pytest.mark.parametrize(
'method',
[
lambda x: x.tolist(),
lambda x: list(x),
lambda x: list(x.__iter__()),
], ids=['tolist', 'list', 'iter'])
def test_categorial_datetimelike(self, method):
i = CategoricalIndex([Timestamp('1999-12-31'),
Timestamp('2000-12-31')])
result = method(i)[0]
assert isinstance(result, Timestamp)
def test_iter_box(self):
vals = [Timestamp('2011-01-01'), Timestamp('2011-01-02')]
s = Series(vals)
assert s.dtype == 'datetime64[ns]'
for res, exp in zip(s, vals):
assert isinstance(res, Timestamp)
assert res.tz is None
assert res == exp
vals = [Timestamp('2011-01-01', tz='US/Eastern'),
Timestamp('2011-01-02', tz='US/Eastern')]
s = Series(vals)
assert s.dtype == 'datetime64[ns, US/Eastern]'
for res, exp in zip(s, vals):
assert isinstance(res, Timestamp)
assert res.tz == exp.tz
assert res == exp
# timedelta
vals = [Timedelta('1 days'), Timedelta('2 days')]
s = Series(vals)
assert s.dtype == 'timedelta64[ns]'
for res, exp in zip(s, vals):
assert isinstance(res, Timedelta)
assert res == exp
# period (object dtype, not boxed)
vals = [pd.Period('2011-01-01', freq='M'),
pd.Period('2011-01-02', freq='M')]
s = Series(vals)
assert s.dtype == 'object'
for res, exp in zip(s, vals):
assert isinstance(res, pd.Period)
assert res.freq == 'M'
assert res == exp
@pytest.mark.parametrize('array, expected_type, dtype', [
(np.array([0, 1], dtype=np.int64), np.ndarray, 'int64'),
(np.array(['a', 'b']), np.ndarray, 'object'),
(pd.Categorical(['a', 'b']), pd.Categorical, 'category'),
(pd.DatetimeIndex(['2017', '2018']), np.ndarray, 'datetime64[ns]'),
(pd.DatetimeIndex(['2017', '2018'], tz="US/Central"), pd.DatetimeIndex,
'datetime64[ns, US/Central]'),
(pd.TimedeltaIndex([10**10]), np.ndarray, 'm8[ns]'),
(pd.PeriodIndex([2018, 2019], freq='A'), np.ndarray, 'object'),
(pd.IntervalIndex.from_breaks([0, 1, 2]), np.ndarray, 'object'),
])
def test_values_consistent(array, expected_type, dtype):
l_values = pd.Series(array)._values
r_values = pd.Index(array)._values
assert type(l_values) is expected_type
assert type(l_values) is type(r_values)
if isinstance(l_values, np.ndarray):
tm.assert_numpy_array_equal(l_values, r_values)
elif isinstance(l_values, pd.Index):
tm.assert_index_equal(l_values, r_values)
elif pd.api.types.is_categorical(l_values):
tm.assert_categorical_equal(l_values, r_values)
else:
raise TypeError("Unexpected type {}".format(type(l_values)))
assert l_values.dtype == dtype
assert r_values.dtype == dtype
@pytest.mark.parametrize('array, expected', [
(np.array([0, 1], dtype=np.int64), np.array([0, 1], dtype=np.int64)),
(np.array(['0', '1']), np.array(['0', '1'], dtype=object)),
(pd.Categorical(['a', 'a']), np.array([0, 0], dtype='int8')),
(pd.DatetimeIndex(['2017-01-01T00:00:00']),
np.array(['2017-01-01T00:00:00'], dtype='M8[ns]')),
(pd.DatetimeIndex(['2017-01-01T00:00:00'], tz="US/Eastern"),
np.array(['2017-01-01T05:00:00'], dtype='M8[ns]')),
(pd.TimedeltaIndex([10**10]), np.array([10**10], dtype='m8[ns]')),
pytest.param(
pd.PeriodIndex(['2017', '2018'], freq='D'),
np.array([17167, 17532]),
marks=pytest.mark.xfail(reason="PeriodArray Not implemented")
),
])
def test_ndarray_values(array, expected):
l_values = pd.Series(array)._ndarray_values
r_values = pd.Index(array)._ndarray_values
tm.assert_numpy_array_equal(l_values, r_values)
tm.assert_numpy_array_equal(l_values, expected)
| bsd-3-clause | -5,006,351,442,534,482,000 | 36.147224 | 79 | 0.505869 | false |
invisiblek/python-for-android | python-modules/twisted/twisted/names/secondary.py | 57 | 3201 | # Copyright (c) 2001-2006 Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.internet import task, defer
from twisted.names import dns
from twisted.names import common
from twisted.names import client
from twisted.names import resolve
from twisted.python import log, failure
from twisted.application import service
class SecondaryAuthorityService(service.Service):
calls = None
def __init__(self, primary, domains):
"""
@param primary: The IP address of the server from which to perform
zone transfers.
@param domains: A sequence of domain names for which to perform
zone transfers.
"""
self.primary = primary
self.domains = [SecondaryAuthority(primary, d) for d in domains]
def getAuthority(self):
return resolve.ResolverChain(self.domains)
def startService(self):
service.Service.startService(self)
self.calls = [task.LoopingCall(d.transfer) for d in self.domains]
i = 0
from twisted.internet import reactor
for c in self.calls:
# XXX Add errbacks, respect proper timeouts
reactor.callLater(i, c.start, 60 * 60)
i += 1
def stopService(self):
service.Service.stopService(self)
for c in self.calls:
c.stop()
from twisted.names.authority import FileAuthority
class SecondaryAuthority(common.ResolverBase):
"""An Authority that keeps itself updated by performing zone transfers"""
transferring = False
soa = records = None
def __init__(self, primaryIP, domain):
common.ResolverBase.__init__(self)
self.primary = primaryIP
self.domain = domain
def transfer(self):
if self.transferring:
return
self.transfering = True
return client.Resolver(servers=[(self.primary, dns.PORT)]
).lookupZone(self.domain
).addCallback(self._cbZone
).addErrback(self._ebZone
)
def _lookup(self, name, cls, type, timeout=None):
if not self.soa or not self.records:
return defer.fail(failure.Failure(dns.DomainError(name)))
return FileAuthority.__dict__['_lookup'](self, name, cls, type, timeout)
#shouldn't we just subclass? :P
lookupZone = FileAuthority.__dict__['lookupZone']
def _cbZone(self, zone):
ans, _, _ = zone
self.records = r = {}
for rec in ans:
if not self.soa and rec.type == dns.SOA:
self.soa = (str(rec.name).lower(), rec.payload)
else:
r.setdefault(str(rec.name).lower(), []).append(rec.payload)
def _ebZone(self, failure):
log.msg("Updating %s from %s failed during zone transfer" % (self.domain, self.primary))
log.err(failure)
def update(self):
self.transfer().addCallbacks(self._cbTransferred, self._ebTransferred)
def _cbTransferred(self, result):
self.transferring = False
def _ebTransferred(self, failure):
self.transferred = False
log.msg("Transferring %s from %s failed after zone transfer" % (self.domain, self.primary))
log.err(failure)
| apache-2.0 | -7,817,506,953,968,229,000 | 30.382353 | 99 | 0.634177 | false |
dkluffy/dkluff-code | code/work/testpassport.py | 1 | 2023 | #!/usr/bin/env python
# coding=utf-8
import re
import json
import sys
fmt = '\033[0;3{}m{}\033[0m'.format
def cls():
print chr(27) + "[2J"
def fmtAnswer(s,sp=None):
r=re.sub("Answer:| |\r|\n","",s)
if sp is None:
a=[]
for i in r.upper():
a+=[i]
return a
return r.upper().split(sp)
def fmtItem(item):
q=""
l=len(item)
i=0
while i < l:
q+=item[i]
i+=1
if re.search("^[A-Z]\.",item[i]):
break
return [q,item[i:]]
def readf(f):
exam=[]
for l in f:
r=re.search("^[0-9]*\.",l)
item=[]
if r:
while not l.startswith("Answer:"):
item+=[l]
l=f.next()
exam+=[fmtItem(item)+[fmtAnswer(l,sp=",")]]
#exam[i] = [ q,[opts],[ans] ]
return exam
def examTodict(exam):
j={}
iid=1
for i in exam:
j[str(iid)]={"question":i[0],"opts":i[1],"ans":i[2]}
iid+=1
return j
def prtq(item):
print "----->"
print fmt(4,item[0])
for i in item[1]:
print i
def prtRes(answer,myanswer,item):
print "Answer is :",answer
c=1
if answer == myanswer:
c=2
for i in item:
if i[0] in answer:
print fmt(c,i)
else:
print i
return answer == myanswer
def recordmis(fdir,iid):
with open(fdir,'a') as r:
print >> r,iid
def play(p):
iid=0
lenth=len(p)
while iid <lenth:
cls()
i=p[iid]
prtq(i)
with open(bookmark,'a') as bk:
print >>bk,iid+1
myanswer=fmtAnswer(raw_input("Enter Answers: "))
cls()
print fmt(4,i[0])
tf=prtRes(i[2],myanswer,i[1])
if not tf:
recordmis(logfile,iid+1)
iid+=1
c=raw_input("<----Continue...?")
try:
iid=int(c)-1
except Exception:
pass
if __name__ == "__main__":
f=open(sys.argv[1])
logfile="wrlog.log"
bookmark="bookmark.txt"
p=readf(f)
wrgs=[]
if len(sys.argv) == 3:
wrgfile=sys.argv[2]
print "Review...."
with open(wrgfile) as r:
for l in r:
wrgs+=[p[int(l[:-1])-1]]
if len(wrgs) > 0:
play(wrgs)
else:
play(p)
| apache-2.0 | -2,902,105,228,599,343,600 | 15.184 | 56 | 0.524963 | false |
kmacinnis/sympy | examples/intermediate/infinite_1d_box.py | 116 | 2911 | #!/usr/bin/env python
"""
Applying perturbation theory to calculate the ground state energy
of the infinite 1D box of width ``a`` with a perturbation
which is linear in ``x``, up to second order in perturbation
"""
from sympy.core import pi
from sympy import Integral, var, S
from sympy.functions import sin, sqrt
def X_n(n, a, x):
"""
Returns the wavefunction X_{n} for an infinite 1D box
``n``
the "principal" quantum number. Corresponds to the number of nodes in
the wavefunction. n >= 0
``a``
width of the well. a > 0
``x``
x coordinate.
"""
n, a, x = map(S, [n, a, x])
C = sqrt(2 / a)
return C * sin(pi * n * x / a)
def E_n(n, a, mass):
"""
Returns the Energy psi_{n} for a 1d potential hole with infinity borders
``n``
the "principal" quantum number. Corresponds to the number of nodes in
the wavefunction. n >= 0
``a``
width of the well. a > 0
``mass``
mass.
"""
return ((n * pi / a)**2) / mass
def energy_corrections(perturbation, n, a=10, mass=0.5):
"""
Calculating first two order corrections due to perturbation theory and
returns tuple where zero element is unperturbated energy, and two second
is corrections
``n``
the "nodal" quantum number. Corresponds to the number of nodes in the
wavefunction. n >= 0
``a``
width of the well. a > 0
``mass``
mass.
"""
x, _a = var("x _a")
Vnm = lambda n, m, a: Integral(X_n(n, a, x) * X_n(m, a, x)
* perturbation.subs({_a: a}), (x, 0, a)).n()
# As we know from theory for V0*r/a we will just V(n, n-1) and V(n, n+1)
# wouldn't equals zero
return (E_n(n, a, mass).evalf(),
Vnm(n, n, a).evalf(),
(Vnm(n, n - 1, a)**2/(E_n(n, a, mass) - E_n(n - 1, a, mass))
+ Vnm(n, n + 1, a)**2/(E_n(n, a, mass) - E_n(n + 1, a, mass))).evalf())
def main():
print()
print("Applying perturbation theory to calculate the ground state energy")
print("of the infinite 1D box of width ``a`` with a perturbation")
print("which is linear in ``x``, up to second order in perturbation.")
print()
x, _a = var("x _a")
perturbation = .1 * x / _a
E1 = energy_corrections(perturbation, 1)
print("Energy for first term (n=1):")
print("E_1^{(0)} = ", E1[0])
print("E_1^{(1)} = ", E1[1])
print("E_1^{(2)} = ", E1[2])
print()
E2 = energy_corrections(perturbation, 2)
print("Energy for second term (n=2):")
print("E_2^{(0)} = ", E2[0])
print("E_2^{(1)} = ", E2[1])
print("E_2^{(2)} = ", E2[2])
print()
E3 = energy_corrections(perturbation, 3)
print("Energy for third term (n=3):")
print("E_3^{(0)} = ", E3[0])
print("E_3^{(1)} = ", E3[1])
print("E_3^{(2)} = ", E3[2])
print()
if __name__ == "__main__":
main()
| bsd-3-clause | 4,209,410,431,733,299,700 | 25.463636 | 82 | 0.544486 | false |
atheed/servo | tests/wpt/web-platform-tests/tools/pytest/_pytest/genscript.py | 191 | 4129 | """ (deprecated) generate a single-file self-contained version of pytest """
import os
import sys
import pkgutil
import py
import _pytest
def find_toplevel(name):
for syspath in sys.path:
base = py.path.local(syspath)
lib = base/name
if lib.check(dir=1):
return lib
mod = base.join("%s.py" % name)
if mod.check(file=1):
return mod
raise LookupError(name)
def pkgname(toplevel, rootpath, path):
parts = path.parts()[len(rootpath.parts()):]
return '.'.join([toplevel] + [x.purebasename for x in parts])
def pkg_to_mapping(name):
toplevel = find_toplevel(name)
name2src = {}
if toplevel.check(file=1): # module
name2src[toplevel.purebasename] = toplevel.read()
else: # package
for pyfile in toplevel.visit('*.py'):
pkg = pkgname(name, toplevel, pyfile)
name2src[pkg] = pyfile.read()
# with wheels py source code might be not be installed
# and the resulting genscript is useless, just bail out.
assert name2src, "no source code found for %r at %r" %(name, toplevel)
return name2src
def compress_mapping(mapping):
import base64, pickle, zlib
data = pickle.dumps(mapping, 2)
data = zlib.compress(data, 9)
data = base64.encodestring(data)
data = data.decode('ascii')
return data
def compress_packages(names):
mapping = {}
for name in names:
mapping.update(pkg_to_mapping(name))
return compress_mapping(mapping)
def generate_script(entry, packages):
data = compress_packages(packages)
tmpl = py.path.local(__file__).dirpath().join('standalonetemplate.py')
exe = tmpl.read()
exe = exe.replace('@SOURCES@', data)
exe = exe.replace('@ENTRY@', entry)
return exe
def pytest_addoption(parser):
group = parser.getgroup("debugconfig")
group.addoption("--genscript", action="store", default=None,
dest="genscript", metavar="path",
help="create standalone pytest script at given target path.")
def pytest_cmdline_main(config):
import _pytest.config
genscript = config.getvalue("genscript")
if genscript:
tw = _pytest.config.create_terminal_writer(config)
tw.line("WARNING: usage of genscript is deprecated.",
red=True)
deps = ['py', '_pytest', 'pytest'] # pluggy is vendored
if sys.version_info < (2,7):
deps.append("argparse")
tw.line("generated script will run on python2.6-python3.3++")
else:
tw.line("WARNING: generated script will not run on python2.6 "
"due to 'argparse' dependency. Use python2.6 "
"to generate a python2.6 compatible script", red=True)
script = generate_script(
'import pytest; raise SystemExit(pytest.cmdline.main())',
deps,
)
genscript = py.path.local(genscript)
genscript.write(script)
tw.line("generated pytest standalone script: %s" % genscript,
bold=True)
return 0
def pytest_namespace():
return {'freeze_includes': freeze_includes}
def freeze_includes():
"""
Returns a list of module names used by py.test that should be
included by cx_freeze.
"""
result = list(_iter_all_modules(py))
result += list(_iter_all_modules(_pytest))
return result
def _iter_all_modules(package, prefix=''):
"""
Iterates over the names of all modules that can be found in the given
package, recursively.
Example:
_iter_all_modules(_pytest) ->
['_pytest.assertion.newinterpret',
'_pytest.capture',
'_pytest.core',
...
]
"""
if type(package) is not str:
path, prefix = package.__path__[0], package.__name__ + '.'
else:
path = package
for _, name, is_package in pkgutil.iter_modules([path]):
if is_package:
for m in _iter_all_modules(os.path.join(path, name), prefix=name + '.'):
yield prefix + m
else:
yield prefix + name
| mpl-2.0 | -3,636,961,366,474,976,000 | 30.280303 | 84 | 0.604505 | false |
gitaarik/django | tests/m2m_through/models.py | 58 | 4699 | from datetime import datetime
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
# M2M described on one of the models
@python_2_unicode_compatible
class Person(models.Model):
name = models.CharField(max_length=128)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Group(models.Model):
name = models.CharField(max_length=128)
members = models.ManyToManyField(Person, through='Membership')
custom_members = models.ManyToManyField(Person, through='CustomMembership', related_name="custom")
nodefaultsnonulls = models.ManyToManyField(
Person,
through='TestNoDefaultsOrNulls',
related_name="testnodefaultsnonulls",
)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Membership(models.Model):
person = models.ForeignKey(Person, models.CASCADE)
group = models.ForeignKey(Group, models.CASCADE)
date_joined = models.DateTimeField(default=datetime.now)
invite_reason = models.CharField(max_length=64, null=True)
class Meta:
ordering = ('date_joined', 'invite_reason', 'group')
def __str__(self):
return "%s is a member of %s" % (self.person.name, self.group.name)
@python_2_unicode_compatible
class CustomMembership(models.Model):
person = models.ForeignKey(
Person,
models.CASCADE,
db_column="custom_person_column",
related_name="custom_person_related_name",
)
group = models.ForeignKey(Group, models.CASCADE)
weird_fk = models.ForeignKey(Membership, models.SET_NULL, null=True)
date_joined = models.DateTimeField(default=datetime.now)
def __str__(self):
return "%s is a member of %s" % (self.person.name, self.group.name)
class Meta:
db_table = "test_table"
ordering = ["date_joined"]
class TestNoDefaultsOrNulls(models.Model):
person = models.ForeignKey(Person, models.CASCADE)
group = models.ForeignKey(Group, models.CASCADE)
nodefaultnonull = models.CharField(max_length=5)
@python_2_unicode_compatible
class PersonSelfRefM2M(models.Model):
name = models.CharField(max_length=5)
friends = models.ManyToManyField('self', through="Friendship", symmetrical=False)
def __str__(self):
return self.name
class Friendship(models.Model):
first = models.ForeignKey(PersonSelfRefM2M, models.CASCADE, related_name="rel_from_set")
second = models.ForeignKey(PersonSelfRefM2M, models.CASCADE, related_name="rel_to_set")
date_friended = models.DateTimeField()
# Custom through link fields
@python_2_unicode_compatible
class Event(models.Model):
title = models.CharField(max_length=50)
invitees = models.ManyToManyField(
Person, through='Invitation',
through_fields=('event', 'invitee'),
related_name='events_invited',
)
def __str__(self):
return self.title
class Invitation(models.Model):
event = models.ForeignKey(Event, models.CASCADE, related_name='invitations')
# field order is deliberately inverted. the target field is "invitee".
inviter = models.ForeignKey(Person, models.CASCADE, related_name='invitations_sent')
invitee = models.ForeignKey(Person, models.CASCADE, related_name='invitations')
@python_2_unicode_compatible
class Employee(models.Model):
name = models.CharField(max_length=5)
subordinates = models.ManyToManyField(
'self',
through="Relationship",
through_fields=('source', 'target'),
symmetrical=False,
)
class Meta:
ordering = ('pk',)
def __str__(self):
return self.name
class Relationship(models.Model):
# field order is deliberately inverted.
another = models.ForeignKey(Employee, models.SET_NULL, related_name="rel_another_set", null=True)
target = models.ForeignKey(Employee, models.CASCADE, related_name="rel_target_set")
source = models.ForeignKey(Employee, models.CASCADE, related_name="rel_source_set")
class Ingredient(models.Model):
iname = models.CharField(max_length=20, unique=True)
class Meta:
ordering = ('iname',)
class Recipe(models.Model):
rname = models.CharField(max_length=20, unique=True)
ingredients = models.ManyToManyField(
Ingredient, through='RecipeIngredient', related_name='recipes',
)
class Meta:
ordering = ('rname',)
class RecipeIngredient(models.Model):
ingredient = models.ForeignKey(Ingredient, models.CASCADE, to_field='iname')
recipe = models.ForeignKey(Recipe, models.CASCADE, to_field='rname')
| bsd-3-clause | -7,930,515,699,810,865,000 | 29.121795 | 102 | 0.69036 | false |
NicolasHerin/game_book_share | node_modules/node-gyp/gyp/pylib/gyp/mac_tool.py | 1569 | 23354 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions to perform Xcode-style build steps.
These functions are executed via gyp-mac-tool when using the Makefile generator.
"""
import fcntl
import fnmatch
import glob
import json
import os
import plistlib
import re
import shutil
import string
import subprocess
import sys
import tempfile
def main(args):
executor = MacTool()
exit_code = executor.Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class MacTool(object):
"""This class performs all the Mac tooling steps. The methods can either be
executed directly, or dispatched from an argument list."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
return name_string.title().replace('-', '')
def ExecCopyBundleResource(self, source, dest, convert_to_binary):
"""Copies a resource file to the bundle/Resources directory, performing any
necessary compilation on each resource."""
extension = os.path.splitext(source)[1].lower()
if os.path.isdir(source):
# Copy tree.
# TODO(thakis): This copies file attributes like mtime, while the
# single-file branch below doesn't. This should probably be changed to
# be consistent with the single-file branch.
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(source, dest)
elif extension == '.xib':
return self._CopyXIBFile(source, dest)
elif extension == '.storyboard':
return self._CopyXIBFile(source, dest)
elif extension == '.strings':
self._CopyStringsFile(source, dest, convert_to_binary)
else:
shutil.copy(source, dest)
def _CopyXIBFile(self, source, dest):
"""Compiles a XIB file with ibtool into a binary plist in the bundle."""
# ibtool sometimes crashes with relative paths. See crbug.com/314728.
base = os.path.dirname(os.path.realpath(__file__))
if os.path.relpath(source):
source = os.path.join(base, source)
if os.path.relpath(dest):
dest = os.path.join(base, dest)
args = ['xcrun', 'ibtool', '--errors', '--warnings', '--notices',
'--output-format', 'human-readable-text', '--compile', dest, source]
ibtool_section_re = re.compile(r'/\*.*\*/')
ibtool_re = re.compile(r'.*note:.*is clipping its content')
ibtoolout = subprocess.Popen(args, stdout=subprocess.PIPE)
current_section_header = None
for line in ibtoolout.stdout:
if ibtool_section_re.match(line):
current_section_header = line
elif not ibtool_re.match(line):
if current_section_header:
sys.stdout.write(current_section_header)
current_section_header = None
sys.stdout.write(line)
return ibtoolout.returncode
def _ConvertToBinary(self, dest):
subprocess.check_call([
'xcrun', 'plutil', '-convert', 'binary1', '-o', dest, dest])
def _CopyStringsFile(self, source, dest, convert_to_binary):
"""Copies a .strings file using iconv to reconvert the input into UTF-16."""
input_code = self._DetectInputEncoding(source) or "UTF-8"
# Xcode's CpyCopyStringsFile / builtin-copyStrings seems to call
# CFPropertyListCreateFromXMLData() behind the scenes; at least it prints
# CFPropertyListCreateFromXMLData(): Old-style plist parser: missing
# semicolon in dictionary.
# on invalid files. Do the same kind of validation.
import CoreFoundation
s = open(source, 'rb').read()
d = CoreFoundation.CFDataCreate(None, s, len(s))
_, error = CoreFoundation.CFPropertyListCreateFromXMLData(None, d, 0, None)
if error:
return
fp = open(dest, 'wb')
fp.write(s.decode(input_code).encode('UTF-16'))
fp.close()
if convert_to_binary == 'True':
self._ConvertToBinary(dest)
def _DetectInputEncoding(self, file_name):
"""Reads the first few bytes from file_name and tries to guess the text
encoding. Returns None as a guess if it can't detect it."""
fp = open(file_name, 'rb')
try:
header = fp.read(3)
except e:
fp.close()
return None
fp.close()
if header.startswith("\xFE\xFF"):
return "UTF-16"
elif header.startswith("\xFF\xFE"):
return "UTF-16"
elif header.startswith("\xEF\xBB\xBF"):
return "UTF-8"
else:
return None
def ExecCopyInfoPlist(self, source, dest, convert_to_binary, *keys):
"""Copies the |source| Info.plist to the destination directory |dest|."""
# Read the source Info.plist into memory.
fd = open(source, 'r')
lines = fd.read()
fd.close()
# Insert synthesized key/value pairs (e.g. BuildMachineOSBuild).
plist = plistlib.readPlistFromString(lines)
if keys:
plist = dict(plist.items() + json.loads(keys[0]).items())
lines = plistlib.writePlistToString(plist)
# Go through all the environment variables and replace them as variables in
# the file.
IDENT_RE = re.compile(r'[/\s]')
for key in os.environ:
if key.startswith('_'):
continue
evar = '${%s}' % key
evalue = os.environ[key]
lines = string.replace(lines, evar, evalue)
# Xcode supports various suffices on environment variables, which are
# all undocumented. :rfc1034identifier is used in the standard project
# template these days, and :identifier was used earlier. They are used to
# convert non-url characters into things that look like valid urls --
# except that the replacement character for :identifier, '_' isn't valid
# in a URL either -- oops, hence :rfc1034identifier was born.
evar = '${%s:identifier}' % key
evalue = IDENT_RE.sub('_', os.environ[key])
lines = string.replace(lines, evar, evalue)
evar = '${%s:rfc1034identifier}' % key
evalue = IDENT_RE.sub('-', os.environ[key])
lines = string.replace(lines, evar, evalue)
# Remove any keys with values that haven't been replaced.
lines = lines.split('\n')
for i in range(len(lines)):
if lines[i].strip().startswith("<string>${"):
lines[i] = None
lines[i - 1] = None
lines = '\n'.join(filter(lambda x: x is not None, lines))
# Write out the file with variables replaced.
fd = open(dest, 'w')
fd.write(lines)
fd.close()
# Now write out PkgInfo file now that the Info.plist file has been
# "compiled".
self._WritePkgInfo(dest)
if convert_to_binary == 'True':
self._ConvertToBinary(dest)
def _WritePkgInfo(self, info_plist):
"""This writes the PkgInfo file from the data stored in Info.plist."""
plist = plistlib.readPlist(info_plist)
if not plist:
return
# Only create PkgInfo for executable types.
package_type = plist['CFBundlePackageType']
if package_type != 'APPL':
return
# The format of PkgInfo is eight characters, representing the bundle type
# and bundle signature, each four characters. If that is missing, four
# '?' characters are used instead.
signature_code = plist.get('CFBundleSignature', '????')
if len(signature_code) != 4: # Wrong length resets everything, too.
signature_code = '?' * 4
dest = os.path.join(os.path.dirname(info_plist), 'PkgInfo')
fp = open(dest, 'w')
fp.write('%s%s' % (package_type, signature_code))
fp.close()
def ExecFlock(self, lockfile, *cmd_list):
"""Emulates the most basic behavior of Linux's flock(1)."""
# Rely on exception handling to report errors.
fd = os.open(lockfile, os.O_RDONLY|os.O_NOCTTY|os.O_CREAT, 0o666)
fcntl.flock(fd, fcntl.LOCK_EX)
return subprocess.call(cmd_list)
def ExecFilterLibtool(self, *cmd_list):
"""Calls libtool and filters out '/path/to/libtool: file: foo.o has no
symbols'."""
libtool_re = re.compile(r'^.*libtool: file: .* has no symbols$')
libtool_re5 = re.compile(
r'^.*libtool: warning for library: ' +
r'.* the table of contents is empty ' +
r'\(no object file members in the library define global symbols\)$')
env = os.environ.copy()
# Ref:
# http://www.opensource.apple.com/source/cctools/cctools-809/misc/libtool.c
# The problem with this flag is that it resets the file mtime on the file to
# epoch=0, e.g. 1970-1-1 or 1969-12-31 depending on timezone.
env['ZERO_AR_DATE'] = '1'
libtoolout = subprocess.Popen(cmd_list, stderr=subprocess.PIPE, env=env)
_, err = libtoolout.communicate()
for line in err.splitlines():
if not libtool_re.match(line) and not libtool_re5.match(line):
print >>sys.stderr, line
# Unconditionally touch the output .a file on the command line if present
# and the command succeeded. A bit hacky.
if not libtoolout.returncode:
for i in range(len(cmd_list) - 1):
if cmd_list[i] == "-o" and cmd_list[i+1].endswith('.a'):
os.utime(cmd_list[i+1], None)
break
return libtoolout.returncode
def ExecPackageFramework(self, framework, version):
"""Takes a path to Something.framework and the Current version of that and
sets up all the symlinks."""
# Find the name of the binary based on the part before the ".framework".
binary = os.path.basename(framework).split('.')[0]
CURRENT = 'Current'
RESOURCES = 'Resources'
VERSIONS = 'Versions'
if not os.path.exists(os.path.join(framework, VERSIONS, version, binary)):
# Binary-less frameworks don't seem to contain symlinks (see e.g.
# chromium's out/Debug/org.chromium.Chromium.manifest/ bundle).
return
# Move into the framework directory to set the symlinks correctly.
pwd = os.getcwd()
os.chdir(framework)
# Set up the Current version.
self._Relink(version, os.path.join(VERSIONS, CURRENT))
# Set up the root symlinks.
self._Relink(os.path.join(VERSIONS, CURRENT, binary), binary)
self._Relink(os.path.join(VERSIONS, CURRENT, RESOURCES), RESOURCES)
# Back to where we were before!
os.chdir(pwd)
def _Relink(self, dest, link):
"""Creates a symlink to |dest| named |link|. If |link| already exists,
it is overwritten."""
if os.path.lexists(link):
os.remove(link)
os.symlink(dest, link)
def ExecCompileXcassets(self, keys, *inputs):
"""Compiles multiple .xcassets files into a single .car file.
This invokes 'actool' to compile all the inputs .xcassets files. The
|keys| arguments is a json-encoded dictionary of extra arguments to
pass to 'actool' when the asset catalogs contains an application icon
or a launch image.
Note that 'actool' does not create the Assets.car file if the asset
catalogs does not contains imageset.
"""
command_line = [
'xcrun', 'actool', '--output-format', 'human-readable-text',
'--compress-pngs', '--notices', '--warnings', '--errors',
]
is_iphone_target = 'IPHONEOS_DEPLOYMENT_TARGET' in os.environ
if is_iphone_target:
platform = os.environ['CONFIGURATION'].split('-')[-1]
if platform not in ('iphoneos', 'iphonesimulator'):
platform = 'iphonesimulator'
command_line.extend([
'--platform', platform, '--target-device', 'iphone',
'--target-device', 'ipad', '--minimum-deployment-target',
os.environ['IPHONEOS_DEPLOYMENT_TARGET'], '--compile',
os.path.abspath(os.environ['CONTENTS_FOLDER_PATH']),
])
else:
command_line.extend([
'--platform', 'macosx', '--target-device', 'mac',
'--minimum-deployment-target', os.environ['MACOSX_DEPLOYMENT_TARGET'],
'--compile',
os.path.abspath(os.environ['UNLOCALIZED_RESOURCES_FOLDER_PATH']),
])
if keys:
keys = json.loads(keys)
for key, value in keys.iteritems():
arg_name = '--' + key
if isinstance(value, bool):
if value:
command_line.append(arg_name)
elif isinstance(value, list):
for v in value:
command_line.append(arg_name)
command_line.append(str(v))
else:
command_line.append(arg_name)
command_line.append(str(value))
# Note: actool crashes if inputs path are relative, so use os.path.abspath
# to get absolute path name for inputs.
command_line.extend(map(os.path.abspath, inputs))
subprocess.check_call(command_line)
def ExecMergeInfoPlist(self, output, *inputs):
"""Merge multiple .plist files into a single .plist file."""
merged_plist = {}
for path in inputs:
plist = self._LoadPlistMaybeBinary(path)
self._MergePlist(merged_plist, plist)
plistlib.writePlist(merged_plist, output)
def ExecCodeSignBundle(self, key, resource_rules, entitlements, provisioning):
"""Code sign a bundle.
This function tries to code sign an iOS bundle, following the same
algorithm as Xcode:
1. copy ResourceRules.plist from the user or the SDK into the bundle,
2. pick the provisioning profile that best match the bundle identifier,
and copy it into the bundle as embedded.mobileprovision,
3. copy Entitlements.plist from user or SDK next to the bundle,
4. code sign the bundle.
"""
resource_rules_path = self._InstallResourceRules(resource_rules)
substitutions, overrides = self._InstallProvisioningProfile(
provisioning, self._GetCFBundleIdentifier())
entitlements_path = self._InstallEntitlements(
entitlements, substitutions, overrides)
subprocess.check_call([
'codesign', '--force', '--sign', key, '--resource-rules',
resource_rules_path, '--entitlements', entitlements_path,
os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['FULL_PRODUCT_NAME'])])
def _InstallResourceRules(self, resource_rules):
"""Installs ResourceRules.plist from user or SDK into the bundle.
Args:
resource_rules: string, optional, path to the ResourceRules.plist file
to use, default to "${SDKROOT}/ResourceRules.plist"
Returns:
Path to the copy of ResourceRules.plist into the bundle.
"""
source_path = resource_rules
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['CONTENTS_FOLDER_PATH'],
'ResourceRules.plist')
if not source_path:
source_path = os.path.join(
os.environ['SDKROOT'], 'ResourceRules.plist')
shutil.copy2(source_path, target_path)
return target_path
def _InstallProvisioningProfile(self, profile, bundle_identifier):
"""Installs embedded.mobileprovision into the bundle.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple containing two dictionary: variables substitutions and values
to overrides when generating the entitlements file.
"""
source_path, provisioning_data, team_id = self._FindProvisioningProfile(
profile, bundle_identifier)
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['CONTENTS_FOLDER_PATH'],
'embedded.mobileprovision')
shutil.copy2(source_path, target_path)
substitutions = self._GetSubstitutions(bundle_identifier, team_id + '.')
return substitutions, provisioning_data['Entitlements']
def _FindProvisioningProfile(self, profile, bundle_identifier):
"""Finds the .mobileprovision file to use for signing the bundle.
Checks all the installed provisioning profiles (or if the user specified
the PROVISIONING_PROFILE variable, only consult it) and select the most
specific that correspond to the bundle identifier.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple of the path to the selected provisioning profile, the data of
the embedded plist in the provisioning profile and the team identifier
to use for code signing.
Raises:
SystemExit: if no .mobileprovision can be used to sign the bundle.
"""
profiles_dir = os.path.join(
os.environ['HOME'], 'Library', 'MobileDevice', 'Provisioning Profiles')
if not os.path.isdir(profiles_dir):
print >>sys.stderr, (
'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
provisioning_profiles = None
if profile:
profile_path = os.path.join(profiles_dir, profile + '.mobileprovision')
if os.path.exists(profile_path):
provisioning_profiles = [profile_path]
if not provisioning_profiles:
provisioning_profiles = glob.glob(
os.path.join(profiles_dir, '*.mobileprovision'))
valid_provisioning_profiles = {}
for profile_path in provisioning_profiles:
profile_data = self._LoadProvisioningProfile(profile_path)
app_id_pattern = profile_data.get(
'Entitlements', {}).get('application-identifier', '')
for team_identifier in profile_data.get('TeamIdentifier', []):
app_id = '%s.%s' % (team_identifier, bundle_identifier)
if fnmatch.fnmatch(app_id, app_id_pattern):
valid_provisioning_profiles[app_id_pattern] = (
profile_path, profile_data, team_identifier)
if not valid_provisioning_profiles:
print >>sys.stderr, (
'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
# If the user has multiple provisioning profiles installed that can be
# used for ${bundle_identifier}, pick the most specific one (ie. the
# provisioning profile whose pattern is the longest).
selected_key = max(valid_provisioning_profiles, key=lambda v: len(v))
return valid_provisioning_profiles[selected_key]
def _LoadProvisioningProfile(self, profile_path):
"""Extracts the plist embedded in a provisioning profile.
Args:
profile_path: string, path to the .mobileprovision file
Returns:
Content of the plist embedded in the provisioning profile as a dictionary.
"""
with tempfile.NamedTemporaryFile() as temp:
subprocess.check_call([
'security', 'cms', '-D', '-i', profile_path, '-o', temp.name])
return self._LoadPlistMaybeBinary(temp.name)
def _MergePlist(self, merged_plist, plist):
"""Merge |plist| into |merged_plist|."""
for key, value in plist.iteritems():
if isinstance(value, dict):
merged_value = merged_plist.get(key, {})
if isinstance(merged_value, dict):
self._MergePlist(merged_value, value)
merged_plist[key] = merged_value
else:
merged_plist[key] = value
else:
merged_plist[key] = value
def _LoadPlistMaybeBinary(self, plist_path):
"""Loads into a memory a plist possibly encoded in binary format.
This is a wrapper around plistlib.readPlist that tries to convert the
plist to the XML format if it can't be parsed (assuming that it is in
the binary format).
Args:
plist_path: string, path to a plist file, in XML or binary format
Returns:
Content of the plist as a dictionary.
"""
try:
# First, try to read the file using plistlib that only supports XML,
# and if an exception is raised, convert a temporary copy to XML and
# load that copy.
return plistlib.readPlist(plist_path)
except:
pass
with tempfile.NamedTemporaryFile() as temp:
shutil.copy2(plist_path, temp.name)
subprocess.check_call(['plutil', '-convert', 'xml1', temp.name])
return plistlib.readPlist(temp.name)
def _GetSubstitutions(self, bundle_identifier, app_identifier_prefix):
"""Constructs a dictionary of variable substitutions for Entitlements.plist.
Args:
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
app_identifier_prefix: string, value for AppIdentifierPrefix
Returns:
Dictionary of substitutions to apply when generating Entitlements.plist.
"""
return {
'CFBundleIdentifier': bundle_identifier,
'AppIdentifierPrefix': app_identifier_prefix,
}
def _GetCFBundleIdentifier(self):
"""Extracts CFBundleIdentifier value from Info.plist in the bundle.
Returns:
Value of CFBundleIdentifier in the Info.plist located in the bundle.
"""
info_plist_path = os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['INFOPLIST_PATH'])
info_plist_data = self._LoadPlistMaybeBinary(info_plist_path)
return info_plist_data['CFBundleIdentifier']
def _InstallEntitlements(self, entitlements, substitutions, overrides):
"""Generates and install the ${BundleName}.xcent entitlements file.
Expands variables "$(variable)" pattern in the source entitlements file,
add extra entitlements defined in the .mobileprovision file and the copy
the generated plist to "${BundlePath}.xcent".
Args:
entitlements: string, optional, path to the Entitlements.plist template
to use, defaults to "${SDKROOT}/Entitlements.plist"
substitutions: dictionary, variable substitutions
overrides: dictionary, values to add to the entitlements
Returns:
Path to the generated entitlements file.
"""
source_path = entitlements
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['PRODUCT_NAME'] + '.xcent')
if not source_path:
source_path = os.path.join(
os.environ['SDKROOT'],
'Entitlements.plist')
shutil.copy2(source_path, target_path)
data = self._LoadPlistMaybeBinary(target_path)
data = self._ExpandVariables(data, substitutions)
if overrides:
for key in overrides:
if key not in data:
data[key] = overrides[key]
plistlib.writePlist(data, target_path)
return target_path
def _ExpandVariables(self, data, substitutions):
"""Expands variables "$(variable)" in data.
Args:
data: object, can be either string, list or dictionary
substitutions: dictionary, variable substitutions to perform
Returns:
Copy of data where each references to "$(variable)" has been replaced
by the corresponding value found in substitutions, or left intact if
the key was not found.
"""
if isinstance(data, str):
for key, value in substitutions.iteritems():
data = data.replace('$(%s)' % key, value)
return data
if isinstance(data, list):
return [self._ExpandVariables(v, substitutions) for v in data]
if isinstance(data, dict):
return {k: self._ExpandVariables(data[k], substitutions) for k in data}
return data
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| mit | 4,744,965,596,486,516,000 | 37.285246 | 80 | 0.665967 | false |
jelugbo/ddi | lms/djangoapps/instructor/tests/test_ecommerce.py | 13 | 13342 | """
Unit tests for Ecommerce feature flag in new instructor dashboard.
"""
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from courseware.tests.tests import TEST_DATA_MONGO_MODULESTORE
from student.tests.factories import AdminFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from course_modes.models import CourseMode
from shoppingcart.models import Coupon, PaidCourseRegistration, CourseRegistrationCode
from mock import patch
from student.roles import CourseFinanceAdminRole
@override_settings(MODULESTORE=TEST_DATA_MONGO_MODULESTORE)
class TestECommerceDashboardViews(ModuleStoreTestCase):
"""
Check for E-commerce view on the new instructor dashboard
"""
def setUp(self):
self.course = CourseFactory.create()
# Create instructor account
self.instructor = AdminFactory.create()
self.client.login(username=self.instructor.username, password="test")
mode = CourseMode(
course_id=self.course.id.to_deprecated_string(), mode_slug='honor',
mode_display_name='honor', min_price=10, currency='usd'
)
mode.save()
# URL for instructor dash
self.url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})
self.e_commerce_link = '<a href="" data-section="e-commerce">E-Commerce</a>'
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
def tearDown(self):
"""
Undo all patches.
"""
patch.stopall()
def test_pass_e_commerce_tab_in_instructor_dashboard(self):
"""
Test Pass E-commerce Tab is in the Instructor Dashboard
"""
response = self.client.get(self.url)
self.assertTrue(self.e_commerce_link in response.content)
def test_user_has_finance_admin_rights_in_e_commerce_tab(self):
response = self.client.get(self.url)
self.assertTrue(self.e_commerce_link in response.content)
# Total amount html should render in e-commerce page, total amount will be 0
total_amount = PaidCourseRegistration.get_total_amount_of_purchased_item(self.course.id)
self.assertTrue('<span>Total Amount: <span>$' + str(total_amount) + '</span></span>' in response.content)
self.assertTrue('Download All e-Commerce Purchase' in response.content)
# removing the course finance_admin role of login user
CourseFinanceAdminRole(self.course.id).remove_users(self.instructor)
# total amount should not be visible in e-commerce page if the user is not finance admin
url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url)
total_amount = PaidCourseRegistration.get_total_amount_of_purchased_item(self.course.id)
self.assertFalse('Download All e-Commerce Purchase' in response.content)
self.assertFalse('<span>Total Amount: <span>$' + str(total_amount) + '</span></span>' in response.content)
def test_user_view_course_price(self):
"""
test to check if the user views the set price button and price in
the instructor dashboard
"""
response = self.client.get(self.url)
self.assertTrue(self.e_commerce_link in response.content)
# Total amount html should render in e-commerce page, total amount will be 0
course_honor_mode = CourseMode.mode_for_course(self.course.id, 'honor')
price = course_honor_mode.min_price
self.assertTrue('Course Price: <span>$' + str(price) + '</span>' in response.content)
self.assertFalse('+ Set Price</a></span>' in response.content)
# removing the course finance_admin role of login user
CourseFinanceAdminRole(self.course.id).remove_users(self.instructor)
# total amount should not be visible in e-commerce page if the user is not finance admin
url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertFalse('+ Set Price</a></span>' in response.content)
def test_update_course_price_check(self):
price = 200
# course B
course2 = CourseFactory.create(org='EDX', display_name='test_course', number='100')
mode = CourseMode(
course_id=course2.id.to_deprecated_string(), mode_slug='honor',
mode_display_name='honor', min_price=30, currency='usd'
)
mode.save()
# course A update
CourseMode.objects.filter(course_id=self.course.id).update(min_price=price)
set_course_price_url = reverse('set_course_mode_price', kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'course_price': price, 'currency': 'usd'}
response = self.client.post(set_course_price_url, data)
self.assertTrue('CourseMode price updated successfully' in response.content)
# Course A updated total amount should be visible in e-commerce page if the user is finance admin
url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
self.assertTrue('Course Price: <span>$' + str(price) + '</span>' in response.content)
def test_user_admin_set_course_price(self):
"""
test to set the course price related functionality.
test al the scenarios for setting a new course price
"""
set_course_price_url = reverse('set_course_mode_price', kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'course_price': '12%', 'currency': 'usd'}
# Value Error course price should be a numeric value
response = self.client.post(set_course_price_url, data)
self.assertTrue("Please Enter the numeric value for the course price" in response.content)
# validation check passes and course price is successfully added
data['course_price'] = 100
response = self.client.post(set_course_price_url, data)
self.assertTrue("CourseMode price updated successfully" in response.content)
course_honor_mode = CourseMode.objects.get(mode_slug='honor')
course_honor_mode.delete()
# Course Mode not exist with mode slug honor
response = self.client.post(set_course_price_url, data)
self.assertTrue("CourseMode with the mode slug({mode_slug}) DoesNotExist".format(mode_slug='honor') in response.content)
def test_add_coupon(self):
"""
Test Add Coupon Scenarios. Handle all the HttpResponses return by add_coupon view
"""
# URL for add_coupon
add_coupon_url = reverse('add_coupon', kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'code': 'A2314', 'course_id': self.course.id.to_deprecated_string(),
'description': 'ADSADASDSAD', 'created_by': self.instructor, 'discount': 5
}
response = self.client.post(add_coupon_url, data)
self.assertTrue("coupon with the coupon code ({code}) added successfully".format(code=data['code']) in response.content)
data = {
'code': 'A2314', 'course_id': self.course.id.to_deprecated_string(),
'description': 'asdsasda', 'created_by': self.instructor, 'discount': 99
}
response = self.client.post(add_coupon_url, data)
self.assertTrue("coupon with the coupon code ({code}) already exist".format(code='A2314') in response.content)
response = self.client.post(self.url)
self.assertTrue('<td>ADSADASDSAD</td>' in response.content)
self.assertTrue('<td>A2314</td>' in response.content)
self.assertFalse('<td>111</td>' in response.content)
data = {
'code': 'A2345314', 'course_id': self.course.id.to_deprecated_string(),
'description': 'asdsasda', 'created_by': self.instructor, 'discount': 199
}
response = self.client.post(add_coupon_url, data)
self.assertTrue("Please Enter the Coupon Discount Value Less than or Equal to 100" in response.content)
data['discount'] = '25%'
response = self.client.post(add_coupon_url, data=data)
self.assertTrue('Please Enter the Integer Value for Coupon Discount' in response.content)
course_registration = CourseRegistrationCode(
code='Vs23Ws4j', course_id=self.course.id.to_deprecated_string(), created_by=self.instructor
)
course_registration.save()
data['code'] = 'Vs23Ws4j'
response = self.client.post(add_coupon_url, data)
self.assertTrue("The code ({code}) that you have tried to define is already in use as a registration code"
.format(code=data['code']) in response.content)
def test_delete_coupon(self):
"""
Test Delete Coupon Scenarios. Handle all the HttpResponses return by remove_coupon view
"""
coupon = Coupon(
code='AS452', description='asdsadsa', course_id=self.course.id.to_deprecated_string(),
percentage_discount=10, created_by=self.instructor
)
coupon.save()
response = self.client.post(self.url)
self.assertTrue('<td>AS452</td>' in response.content)
# URL for remove_coupon
delete_coupon_url = reverse('remove_coupon', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(delete_coupon_url, {'id': coupon.id})
self.assertTrue('coupon with the coupon id ({coupon_id}) updated successfully'.format(coupon_id=coupon.id) in response.content)
coupon.is_active = False
coupon.save()
response = self.client.post(delete_coupon_url, {'id': coupon.id})
self.assertTrue('coupon with the coupon id ({coupon_id}) is already inactive'.format(coupon_id=coupon.id) in response.content)
response = self.client.post(delete_coupon_url, {'id': 24454})
self.assertTrue('coupon with the coupon id ({coupon_id}) DoesNotExist'.format(coupon_id=24454) in response.content)
response = self.client.post(delete_coupon_url, {'id': ''})
self.assertTrue('coupon id is None' in response.content)
def test_get_coupon_info(self):
"""
Test Edit Coupon Info Scenarios. Handle all the HttpResponses return by edit_coupon_info view
"""
coupon = Coupon(
code='AS452', description='asdsadsa', course_id=self.course.id.to_deprecated_string(),
percentage_discount=10, created_by=self.instructor
)
coupon.save()
# URL for edit_coupon_info
edit_url = reverse('get_coupon_info', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(edit_url, {'id': coupon.id})
self.assertTrue('coupon with the coupon id ({coupon_id}) updated successfully'.format(coupon_id=coupon.id) in response.content)
response = self.client.post(edit_url, {'id': 444444})
self.assertTrue('coupon with the coupon id ({coupon_id}) DoesNotExist'.format(coupon_id=444444) in response.content)
response = self.client.post(edit_url, {'id': ''})
self.assertTrue('coupon id not found"' in response.content)
coupon.is_active = False
coupon.save()
response = self.client.post(edit_url, {'id': coupon.id})
self.assertTrue("coupon with the coupon id ({coupon_id}) is already inactive".format(coupon_id=coupon.id) in response.content)
def test_update_coupon(self):
"""
Test Update Coupon Info Scenarios. Handle all the HttpResponses return by update_coupon view
"""
coupon = Coupon(
code='AS452', description='asdsadsa', course_id=self.course.id.to_deprecated_string(),
percentage_discount=10, created_by=self.instructor
)
coupon.save()
response = self.client.post(self.url)
self.assertTrue('<td>AS452</td>' in response.content)
data = {
'coupon_id': coupon.id, 'code': 'AS452', 'discount': '10', 'description': 'updated_description', # pylint: disable=E1101
'course_id': coupon.course_id.to_deprecated_string()
}
# URL for update_coupon
update_coupon_url = reverse('update_coupon', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(update_coupon_url, data=data)
self.assertTrue('coupon with the coupon id ({coupon_id}) updated Successfully'.format(coupon_id=coupon.id)in response.content)
response = self.client.post(self.url)
self.assertTrue('<td>updated_description</td>' in response.content)
data['coupon_id'] = 1000 # Coupon Not Exist with this ID
response = self.client.post(update_coupon_url, data=data)
self.assertTrue('coupon with the coupon id ({coupon_id}) DoesNotExist'.format(coupon_id=1000) in response.content)
data['coupon_id'] = '' # Coupon id is not provided
response = self.client.post(update_coupon_url, data=data)
self.assertTrue('coupon id not found' in response.content)
| agpl-3.0 | 3,010,905,836,662,867,000 | 47.871795 | 135 | 0.661145 | false |
vipulroxx/sympy | sympy/core/core.py | 30 | 3814 | """ The core's core. """
from __future__ import print_function, division
# used for canonical ordering of symbolic sequences
# via __cmp__ method:
# FIXME this is *so* irrelevant and outdated!
ordering_of_classes = [
# singleton numbers
'Zero', 'One', 'Half', 'Infinity', 'NaN', 'NegativeOne', 'NegativeInfinity',
# numbers
'Integer', 'Rational', 'Float',
# singleton symbols
'Exp1', 'Pi', 'ImaginaryUnit',
# symbols
'Symbol', 'Wild', 'Temporary',
# arithmetic operations
'Pow', 'Mul', 'Add',
# function values
'Derivative', 'Integral',
# defined singleton functions
'Abs', 'Sign', 'Sqrt',
'Floor', 'Ceiling',
'Re', 'Im', 'Arg',
'Conjugate',
'Exp', 'Log',
'Sin', 'Cos', 'Tan', 'Cot', 'ASin', 'ACos', 'ATan', 'ACot',
'Sinh', 'Cosh', 'Tanh', 'Coth', 'ASinh', 'ACosh', 'ATanh', 'ACoth',
'RisingFactorial', 'FallingFactorial',
'factorial', 'binomial',
'Gamma', 'LowerGamma', 'UpperGamma', 'PolyGamma',
'Erf',
# special polynomials
'Chebyshev', 'Chebyshev2',
# undefined functions
'Function', 'WildFunction',
# anonymous functions
'Lambda',
# Landau O symbol
'Order',
# relational operations
'Equality', 'Unequality', 'StrictGreaterThan', 'StrictLessThan',
'GreaterThan', 'LessThan',
]
class BasicType(type):
pass
class Registry(object):
"""
Base class for registry objects.
Registries map a name to an object using attribute notation. Registry
classes behave singletonically: all their instances share the same state,
which is stored in the class object.
All subclasses should set `__slots__ = []`.
"""
__slots__ = []
def __setattr__(self, name, obj):
setattr(self.__class__, name, obj)
def __delattr__(self, name):
delattr(self.__class__, name)
#A set containing all sympy class objects, kept in sync with C
all_classes = set()
class ClassRegistry(Registry):
"""
Namespace for SymPy classes
This is needed to avoid problems with cyclic imports.
To get a SymPy class, use `C.<class_name>` e.g. `C.Rational`, `C.Add`.
For performance reasons, this is coupled with a set `all_classes` holding
the classes, which should not be modified directly.
"""
__slots__ = []
def __setattr__(self, name, cls):
Registry.__setattr__(self, name, cls)
all_classes.add(cls)
def __delattr__(self, name):
cls = getattr(self, name)
Registry.__delattr__(self, name)
# The same class could have different names, so make sure
# it's really gone from C before removing it from all_classes.
if cls not in self.__class__.__dict__.itervalues():
all_classes.remove(cls)
C = ClassRegistry()
class BasicMeta(BasicType):
def __init__(cls, *args, **kws):
setattr(C, cls.__name__, cls)
def __cmp__(cls, other):
# If the other object is not a Basic subclass, then we are not equal to
# it.
if not isinstance(other, BasicType):
return -1
n1 = cls.__name__
n2 = other.__name__
if n1 == n2:
return 0
UNKNOWN = len(ordering_of_classes) + 1
try:
i1 = ordering_of_classes.index(n1)
except ValueError:
i1 = UNKNOWN
try:
i2 = ordering_of_classes.index(n2)
except ValueError:
i2 = UNKNOWN
if i1 == UNKNOWN and i2 == UNKNOWN:
return (n1 > n2) - (n1 < n2)
return (i1 > i2) - (i1 < i2)
def __lt__(cls, other):
if cls.__cmp__(other) == -1:
return True
return False
def __gt__(cls, other):
if cls.__cmp__(other) == 1:
return True
return False
C.BasicMeta = BasicMeta
| bsd-3-clause | -423,659,651,413,438,700 | 26.839416 | 80 | 0.579444 | false |
archilogic-com/three.js | utils/exporters/blender/addons/io_three/exporter/utilities.py | 1 | 2246 | import uuid
import hashlib
from .. import constants
ROUND = constants.DEFAULT_PRECISION
def bit_mask(flags):
"""Generate a bit mask.
:type flags: dict
:return: int
"""
bit = 0
true = lambda x, y: (x | (1 << y))
false = lambda x, y: (x & (~(1 << y)))
for mask, position in constants.MASK.items():
func = true if flags.get(mask) else false
bit = func(bit, position)
return bit
def hash(value):
"""Generate a hash from a given value
:param value:
:rtype: str
"""
hash_ = hashlib.md5()
hash_.update(repr(value).encode('utf8'))
return hash_.hexdigest()
def id():
"""Generate a random UUID
:rtype: str
"""
return str(uuid.uuid4()).upper()
def id_from_name(name):
"""Generate a UUID using a name as the namespace
:type name: str
:rtype: str
"""
return str(uuid.uuid3(uuid.NAMESPACE_DNS, name)).upper()
def rgb2int(rgb):
"""Convert a given rgb value to an integer
:type rgb: list|tuple
:rtype: int
"""
is_tuple = isinstance(rgb, tuple)
rgb = list(rgb) if is_tuple else rgb
colour = (int(rgb[0]*255) << 16) + (int(rgb[1]*255) << 8) + int(rgb[2]*255)
return colour
def round_off(value, ndigits=ROUND):
"""Round off values to specified limit
:param value: value(s) to round off
:param ndigits: limit (Default value = ROUND)
:type value: float|list|tuple
:return: the same data type that was passed
:rtype: float|list|tuple
"""
is_tuple = isinstance(value, tuple)
is_list = isinstance(value, list)
value = list(value) if is_tuple else value
value = [value] if not is_list and not is_tuple else value
value = [round(val, ndigits) for val in value]
if is_tuple:
value = tuple(value)
elif not is_list:
value = value[0]
return value
def rounding(options):
"""By evaluation the options determine if precision was
enabled and what the value is
:type options: dict
:rtype: bool, int
"""
round_off_ = options.get(constants.ENABLE_PRECISION)
if round_off_:
round_val = options[constants.PRECISION]
else:
round_val = None
return (round_off_, round_val)
| mit | 2,366,144,468,725,731,300 | 18.876106 | 79 | 0.607747 | false |
j00bar/ansible | test/units/playbook/test_helpers.py | 60 | 19182 | # (c) 2016, Adrian Likins <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import MagicMock
from units.mock.loader import DictDataLoader
from ansible import errors
from ansible.playbook.block import Block
from ansible.playbook.handler import Handler
from ansible.playbook.task import Task
from ansible.playbook.task_include import TaskInclude
from ansible.playbook.role.include import RoleInclude
from ansible.playbook import helpers
class MixinForMocks(object):
def _setup(self):
# This is not a very good mixin, lots of side effects
self.fake_loader = DictDataLoader({'include_test.yml': "",
'other_include_test.yml': ""})
self.mock_tqm = MagicMock(name='MockTaskQueueManager')
self.mock_play = MagicMock(name='MockPlay')
self.mock_iterator = MagicMock(name='MockIterator')
self.mock_iterator._play = self.mock_play
self.mock_inventory = MagicMock(name='MockInventory')
self.mock_inventory._hosts_cache = dict()
def _get_host(host_name):
return None
self.mock_inventory.get_host.side_effect = _get_host
# TODO: can we use a real VariableManager?
self.mock_variable_manager = MagicMock(name='MockVariableManager')
self.mock_variable_manager.get_vars.return_value = dict()
self.mock_block = MagicMock(name='MockBlock')
self.fake_role_loader = DictDataLoader({"/etc/ansible/roles/bogus_role/tasks/main.yml": """
- shell: echo 'hello world'
"""})
self._test_data_path = os.path.dirname(__file__)
self.fake_include_loader = DictDataLoader({"/dev/null/includes/test_include.yml": """
- include: other_test_include.yml
- shell: echo 'hello world'
""",
"/dev/null/includes/static_test_include.yml": """
- include: other_test_include.yml
- shell: echo 'hello static world'
""",
"/dev/null/includes/other_test_include.yml": """
- debug:
msg: other_test_include_debug
"""})
class TestLoadListOfTasks(unittest.TestCase, MixinForMocks):
def setUp(self):
self._setup()
def _assert_is_task_list(self, results):
for result in results:
self.assertIsInstance(result, Task)
def _assert_is_task_list_or_blocks(self, results):
self.assertIsInstance(results, list)
for result in results:
self.assertIsInstance(result, (Task, Block))
def test_ds_not_list(self):
ds = {}
self.assertRaises(AssertionError, helpers.load_list_of_tasks,
ds, self.mock_play, block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None)
def test_empty_task(self):
ds = [{}]
self.assertRaisesRegexp(errors.AnsibleParserError,
"no action detected in task. This often indicates a misspelled module name, or incorrect module path",
helpers.load_list_of_tasks,
ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_loader)
def test_empty_task_use_handlers(self):
ds = [{}]
self.assertRaisesRegexp(errors.AnsibleParserError,
"no action detected in task. This often indicates a misspelled module name, or incorrect module path",
helpers.load_list_of_tasks,
ds,
use_handlers=True,
play=self.mock_play,
variable_manager=self.mock_variable_manager,
loader=self.fake_loader)
def test_one_bogus_block(self):
ds = [{'block': None}]
self.assertRaisesRegexp(errors.AnsibleParserError,
"A malformed block was encountered",
helpers.load_list_of_tasks,
ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_loader)
def test_unknown_action(self):
action_name = 'foo_test_unknown_action'
ds = [{'action': action_name}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_loader)
self._assert_is_task_list_or_blocks(res)
self.assertEquals(res[0].action, action_name)
def test_block_unknown_action(self):
action_name = 'foo_test_block_unknown_action'
ds = [{
'block': [{'action': action_name}]
}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_loader)
self._assert_is_task_list_or_blocks(res)
self.assertIsInstance(res[0], Block)
self._assert_default_block(res[0])
def _assert_default_block(self, block):
# the expected defaults
self.assertIsInstance(block.block, list)
self.assertEquals(len(block.block), 1)
self.assertIsInstance(block.rescue, list)
self.assertEquals(len(block.rescue), 0)
self.assertIsInstance(block.always, list)
self.assertEquals(len(block.always), 0)
def test_block_unknown_action_use_handlers(self):
ds = [{
'block': [{'action': 'foo_test_block_unknown_action'}]
}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play, use_handlers=True,
variable_manager=self.mock_variable_manager, loader=self.fake_loader)
self._assert_is_task_list_or_blocks(res)
self.assertIsInstance(res[0], Block)
self._assert_default_block(res[0])
def test_one_bogus_block_use_handlers(self):
ds = [{'block': True}]
self.assertRaisesRegexp(errors.AnsibleParserError,
"A malformed block was encountered",
helpers.load_list_of_tasks,
ds, play=self.mock_play, use_handlers=True,
variable_manager=self.mock_variable_manager, loader=self.fake_loader)
def test_one_bogus_include(self):
ds = [{'include': 'somefile.yml'}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_loader)
self.assertIsInstance(res, list)
self.assertEquals(len(res), 0)
def test_one_bogus_include_use_handlers(self):
ds = [{'include': 'somefile.yml'}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play, use_handlers=True,
variable_manager=self.mock_variable_manager, loader=self.fake_loader)
self.assertIsInstance(res, list)
self.assertEquals(len(res), 0)
def test_one_bogus_include_static(self):
ds = [{'include': 'somefile.yml',
'static': 'true'}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_loader)
self.assertIsInstance(res, list)
self.assertEquals(len(res), 0)
def test_one_include(self):
ds = [{'include': '/dev/null/includes/other_test_include.yml'}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
self.assertEquals(len(res), 1)
self._assert_is_task_list_or_blocks(res)
def test_one_parent_include(self):
ds = [{'include': '/dev/null/includes/test_include.yml'}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
self._assert_is_task_list_or_blocks(res)
self.assertIsInstance(res[0], Block)
self.assertIsInstance(res[0]._parent, TaskInclude)
# TODO/FIXME: do this non deprecated way
def test_one_include_tags(self):
ds = [{'include': '/dev/null/includes/other_test_include.yml',
'tags': ['test_one_include_tags_tag1', 'and_another_tagB']
}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
self._assert_is_task_list_or_blocks(res)
self.assertIsInstance(res[0], Block)
self.assertIn('test_one_include_tags_tag1', res[0].tags)
self.assertIn('and_another_tagB', res[0].tags)
# TODO/FIXME: do this non deprecated way
def test_one_parent_include_tags(self):
ds = [{'include': '/dev/null/includes/test_include.yml',
#'vars': {'tags': ['test_one_parent_include_tags_tag1', 'and_another_tag2']}
'tags': ['test_one_parent_include_tags_tag1', 'and_another_tag2']
}
]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
self._assert_is_task_list_or_blocks(res)
self.assertIsInstance(res[0], Block)
self.assertIn('test_one_parent_include_tags_tag1', res[0].tags)
self.assertIn('and_another_tag2', res[0].tags)
# It would be useful to be able to tell what kind of deprecation we encountered and where we encountered it.
def test_one_include_tags_deprecated_mixed(self):
ds = [{'include': "/dev/null/includes/other_test_include.yml",
'vars': {'tags': "['tag_on_include1', 'tag_on_include2']"},
'tags': 'mixed_tag1, mixed_tag2'
}]
self.assertRaisesRegexp(errors.AnsibleParserError, 'Mixing styles',
helpers.load_list_of_tasks,
ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
def test_one_include_tags_deprecated_include(self):
ds = [{'include': '/dev/null/includes/other_test_include.yml',
'vars': {'tags': ['include_tag1_deprecated', 'and_another_tagB_deprecated']}
}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
self._assert_is_task_list_or_blocks(res)
self.assertIsInstance(res[0], Block)
self.assertIn('include_tag1_deprecated', res[0].tags)
self.assertIn('and_another_tagB_deprecated', res[0].tags)
def test_one_include_use_handlers(self):
ds = [{'include': '/dev/null/includes/other_test_include.yml'}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
use_handlers=True,
variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
self._assert_is_task_list_or_blocks(res)
self.assertIsInstance(res[0], Handler)
def test_one_parent_include_use_handlers(self):
ds = [{'include': '/dev/null/includes/test_include.yml'}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
use_handlers=True,
variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
self._assert_is_task_list_or_blocks(res)
self.assertIsInstance(res[0], Handler)
# default for Handler
self.assertEquals(res[0].listen, None)
# TODO/FIXME: this doesn't seen right
# figure out how to get the non-static errors to be raised, this seems to just ignore everything
def test_one_include_not_static(self):
ds = [{
'include': '/dev/null/includes/static_test_include.yml',
'static': False
}]
#a_block = Block()
ti_ds = {'include': '/dev/null/includes/ssdftatic_test_include.yml'}
a_task_include = TaskInclude()
ti = a_task_include.load(ti_ds)
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
block=ti,
variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
self._assert_is_task_list_or_blocks(res)
self.assertIsInstance(res[0], Task)
self.assertEquals(res[0].args['_raw_params'], '/dev/null/includes/static_test_include.yml')
# TODO/FIXME: This two get stuck trying to make a mock_block into a TaskInclude
# def test_one_include(self):
# ds = [{'include': 'other_test_include.yml'}]
# res = helpers.load_list_of_tasks(ds, play=self.mock_play,
# block=self.mock_block,
# variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
# print(res)
# def test_one_parent_include(self):
# ds = [{'include': 'test_include.yml'}]
# res = helpers.load_list_of_tasks(ds, play=self.mock_play,
# block=self.mock_block,
# variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
# print(res)
def test_one_bogus_include_role(self):
ds = [{'include_role': {'name': 'bogus_role'}}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
block=self.mock_block,
variable_manager=self.mock_variable_manager, loader=self.fake_role_loader)
self.assertEquals(len(res), 1)
self._assert_is_task_list_or_blocks(res)
def test_one_bogus_include_role_use_handlers(self):
ds = [{'include_role': {'name': 'bogus_role'}}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play, use_handlers=True,
block=self.mock_block,
variable_manager=self.mock_variable_manager,
loader=self.fake_role_loader)
self.assertEquals(len(res), 1)
self._assert_is_task_list_or_blocks(res)
class TestLoadListOfRoles(unittest.TestCase, MixinForMocks):
def setUp(self):
self._setup()
def test_ds_not_list(self):
ds = {}
self.assertRaises(AssertionError, helpers.load_list_of_roles,
ds, self.mock_play)
def test_empty_role(self):
ds = [{}]
self.assertRaisesRegexp(errors.AnsibleError,
"role definitions must contain a role name",
helpers.load_list_of_roles,
ds, self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_role_loader)
def test_empty_role_just_name(self):
ds = [{'name': 'bogus_role'}]
res = helpers.load_list_of_roles(ds, self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_role_loader)
self.assertIsInstance(res, list)
for r in res:
self.assertIsInstance(r, RoleInclude)
def test_block_unknown_action(self):
ds = [{
'block': [{'action': 'foo_test_block_unknown_action'}]
}]
ds = [{'name': 'bogus_role'}]
res = helpers.load_list_of_roles(ds, self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_role_loader)
self.assertIsInstance(res, list)
for r in res:
self.assertIsInstance(r, RoleInclude)
class TestLoadListOfBlocks(unittest.TestCase, MixinForMocks):
def setUp(self):
self._setup()
def test_ds_not_list(self):
ds = {}
mock_play = MagicMock(name='MockPlay')
self.assertRaises(AssertionError, helpers.load_list_of_blocks,
ds, mock_play, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None)
def test_empty_block(self):
ds = [{}]
mock_play = MagicMock(name='MockPlay')
self.assertRaisesRegexp(errors.AnsibleParserError,
"no action detected in task. This often indicates a misspelled module name, or incorrect module path",
helpers.load_list_of_blocks,
ds, mock_play,
parent_block=None,
role=None,
task_include=None,
use_handlers=False,
variable_manager=None,
loader=None)
def test_block_unknown_action(self):
ds = [{'action': 'foo'}]
mock_play = MagicMock(name='MockPlay')
res = helpers.load_list_of_blocks(ds, mock_play, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None,
loader=None)
self.assertIsInstance(res, list)
for block in res:
self.assertIsInstance(block, Block)
| gpl-3.0 | -8,338,170,430,065,940,000 | 47.31738 | 148 | 0.564435 | false |
SGCreations/Flask | Work/TriviaMVA/TriviaMVA/env/Lib/site-packages/jinja2/testsuite/security.py | 415 | 6204 | # -*- coding: utf-8 -*-
"""
jinja2.testsuite.security
~~~~~~~~~~~~~~~~~~~~~~~~~
Checks the sandbox and other security features.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import unittest
from jinja2.testsuite import JinjaTestCase
from jinja2 import Environment
from jinja2.sandbox import SandboxedEnvironment, \
ImmutableSandboxedEnvironment, unsafe
from jinja2 import Markup, escape
from jinja2.exceptions import SecurityError, TemplateSyntaxError, \
TemplateRuntimeError
from jinja2._compat import text_type
class PrivateStuff(object):
def bar(self):
return 23
@unsafe
def foo(self):
return 42
def __repr__(self):
return 'PrivateStuff'
class PublicStuff(object):
bar = lambda self: 23
_foo = lambda self: 42
def __repr__(self):
return 'PublicStuff'
class SandboxTestCase(JinjaTestCase):
def test_unsafe(self):
env = SandboxedEnvironment()
self.assert_raises(SecurityError, env.from_string("{{ foo.foo() }}").render,
foo=PrivateStuff())
self.assert_equal(env.from_string("{{ foo.bar() }}").render(foo=PrivateStuff()), '23')
self.assert_raises(SecurityError, env.from_string("{{ foo._foo() }}").render,
foo=PublicStuff())
self.assert_equal(env.from_string("{{ foo.bar() }}").render(foo=PublicStuff()), '23')
self.assert_equal(env.from_string("{{ foo.__class__ }}").render(foo=42), '')
self.assert_equal(env.from_string("{{ foo.func_code }}").render(foo=lambda:None), '')
# security error comes from __class__ already.
self.assert_raises(SecurityError, env.from_string(
"{{ foo.__class__.__subclasses__() }}").render, foo=42)
def test_immutable_environment(self):
env = ImmutableSandboxedEnvironment()
self.assert_raises(SecurityError, env.from_string(
'{{ [].append(23) }}').render)
self.assert_raises(SecurityError, env.from_string(
'{{ {1:2}.clear() }}').render)
def test_restricted(self):
env = SandboxedEnvironment()
self.assert_raises(TemplateSyntaxError, env.from_string,
"{% for item.attribute in seq %}...{% endfor %}")
self.assert_raises(TemplateSyntaxError, env.from_string,
"{% for foo, bar.baz in seq %}...{% endfor %}")
def test_markup_operations(self):
# adding two strings should escape the unsafe one
unsafe = '<script type="application/x-some-script">alert("foo");</script>'
safe = Markup('<em>username</em>')
assert unsafe + safe == text_type(escape(unsafe)) + text_type(safe)
# string interpolations are safe to use too
assert Markup('<em>%s</em>') % '<bad user>' == \
'<em><bad user></em>'
assert Markup('<em>%(username)s</em>') % {
'username': '<bad user>'
} == '<em><bad user></em>'
# an escaped object is markup too
assert type(Markup('foo') + 'bar') is Markup
# and it implements __html__ by returning itself
x = Markup("foo")
assert x.__html__() is x
# it also knows how to treat __html__ objects
class Foo(object):
def __html__(self):
return '<em>awesome</em>'
def __unicode__(self):
return 'awesome'
assert Markup(Foo()) == '<em>awesome</em>'
assert Markup('<strong>%s</strong>') % Foo() == \
'<strong><em>awesome</em></strong>'
# escaping and unescaping
assert escape('"<>&\'') == '"<>&''
assert Markup("<em>Foo & Bar</em>").striptags() == "Foo & Bar"
assert Markup("<test>").unescape() == "<test>"
def test_template_data(self):
env = Environment(autoescape=True)
t = env.from_string('{% macro say_hello(name) %}'
'<p>Hello {{ name }}!</p>{% endmacro %}'
'{{ say_hello("<blink>foo</blink>") }}')
escaped_out = '<p>Hello <blink>foo</blink>!</p>'
assert t.render() == escaped_out
assert text_type(t.module) == escaped_out
assert escape(t.module) == escaped_out
assert t.module.say_hello('<blink>foo</blink>') == escaped_out
assert escape(t.module.say_hello('<blink>foo</blink>')) == escaped_out
def test_attr_filter(self):
env = SandboxedEnvironment()
tmpl = env.from_string('{{ cls|attr("__subclasses__")() }}')
self.assert_raises(SecurityError, tmpl.render, cls=int)
def test_binary_operator_intercepting(self):
def disable_op(left, right):
raise TemplateRuntimeError('that operator so does not work')
for expr, ctx, rv in ('1 + 2', {}, '3'), ('a + 2', {'a': 2}, '4'):
env = SandboxedEnvironment()
env.binop_table['+'] = disable_op
t = env.from_string('{{ %s }}' % expr)
assert t.render(ctx) == rv
env.intercepted_binops = frozenset(['+'])
t = env.from_string('{{ %s }}' % expr)
try:
t.render(ctx)
except TemplateRuntimeError as e:
pass
else:
self.fail('expected runtime error')
def test_unary_operator_intercepting(self):
def disable_op(arg):
raise TemplateRuntimeError('that operator so does not work')
for expr, ctx, rv in ('-1', {}, '-1'), ('-a', {'a': 2}, '-2'):
env = SandboxedEnvironment()
env.unop_table['-'] = disable_op
t = env.from_string('{{ %s }}' % expr)
assert t.render(ctx) == rv
env.intercepted_unops = frozenset(['-'])
t = env.from_string('{{ %s }}' % expr)
try:
t.render(ctx)
except TemplateRuntimeError as e:
pass
else:
self.fail('expected runtime error')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(SandboxTestCase))
return suite
| apache-2.0 | -5,241,616,003,818,179,000 | 36.373494 | 94 | 0.553997 | false |
jnerin/ansible | lib/ansible/module_utils/ovirt.py | 3 | 26081 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import collections
import inspect
import os
import time
from abc import ABCMeta, abstractmethod
from datetime import datetime
from distutils.version import LooseVersion
try:
from enum import Enum # enum is a ovirtsdk4 requirement
import ovirtsdk4 as sdk
import ovirtsdk4.version as sdk_version
HAS_SDK = LooseVersion(sdk_version.VERSION) >= LooseVersion('4.0.0')
except ImportError:
HAS_SDK = False
BYTES_MAP = {
'kib': 2**10,
'mib': 2**20,
'gib': 2**30,
'tib': 2**40,
'pib': 2**50,
}
def check_sdk(module):
if not HAS_SDK:
module.fail_json(
msg='ovirtsdk4 version 4.0.0 or higher is required for this module'
)
def get_dict_of_struct(struct, connection=None, fetch_nested=False, attributes=None):
"""
Convert SDK Struct type into dictionary.
"""
res = {}
def remove_underscore(val):
if val.startswith('_'):
val = val[1:]
remove_underscore(val)
return val
def convert_value(value):
nested = False
if isinstance(value, sdk.Struct):
return get_dict_of_struct(value)
elif isinstance(value, Enum) or isinstance(value, datetime):
return str(value)
elif isinstance(value, list) or isinstance(value, sdk.List):
if isinstance(value, sdk.List) and fetch_nested and value.href:
try:
value = connection.follow_link(value)
nested = True
except sdk.Error:
value = []
ret = []
for i in value:
if isinstance(i, sdk.Struct):
if not nested:
ret.append(get_dict_of_struct(i))
else:
nested_obj = dict(
(attr, convert_value(getattr(i, attr)))
for attr in attributes if getattr(i, attr, None)
)
nested_obj['id'] = getattr(i, 'id', None),
ret.append(nested_obj)
elif isinstance(i, Enum):
ret.append(str(i))
else:
ret.append(i)
return ret
else:
return value
if struct is not None:
for key, value in struct.__dict__.items():
if value is None:
continue
key = remove_underscore(key)
res[key] = convert_value(value)
return res
def engine_version(connection):
"""
Return string representation of oVirt engine version.
"""
engine_api = connection.system_service().get()
engine_version = engine_api.product_info.version
return '%s.%s' % (engine_version.major, engine_version.minor)
def create_connection(auth):
"""
Create a connection to Python SDK, from task `auth` parameter.
If user doesnt't have SSO token the `auth` dictionary has following parameters mandatory:
url, username, password
If user has SSO token the `auth` dictionary has following parameters mandatory:
url, token
The `ca_file` parameter is mandatory in case user want to use secure connection,
in case user want to use insecure connection, it's mandatory to send insecure=True.
:param auth: dictionary which contains needed values for connection creation
:return: Python SDK connection
"""
return sdk.Connection(
url=auth.get('url'),
username=auth.get('username'),
password=auth.get('password'),
ca_file=auth.get('ca_file', None),
insecure=auth.get('insecure', False),
token=auth.get('token', None),
kerberos=auth.get('kerberos', None),
headers=auth.get('headers', None),
)
def convert_to_bytes(param):
"""
This method convert units to bytes, which follow IEC standard.
:param param: value to be converted
"""
if param is None:
return None
# Get rid of whitespaces:
param = ''.join(param.split())
# Convert to bytes:
if param[-3].lower() in ['k', 'm', 'g', 't', 'p']:
return int(param[:-3]) * BYTES_MAP.get(param[-3:].lower(), 1)
elif param.isdigit():
return int(param) * 2**10
else:
raise ValueError(
"Unsupported value(IEC supported): '{value}'".format(value=param)
)
def follow_link(connection, link):
"""
This method returns the entity of the element which link points to.
:param connection: connection to the Python SDK
:param link: link of the entity
:return: entity which link points to
"""
if link:
return connection.follow_link(link)
else:
return None
def get_link_name(connection, link):
"""
This method returns the name of the element which link points to.
:param connection: connection to the Python SDK
:param link: link of the entity
:return: name of the entity, which link points to
"""
if link:
return connection.follow_link(link).name
else:
return None
def equal(param1, param2, ignore_case=False):
"""
Compare two parameters and return if they are equal.
This parameter doesn't run equal operation if first parameter is None.
With this approach we don't run equal operation in case user don't
specify parameter in their task.
:param param1: user inputted parameter
:param param2: value of entity parameter
:return: True if parameters are equal or first parameter is None, otherwise False
"""
if param1 is not None:
if ignore_case:
return param1.lower() == param2.lower()
return param1 == param2
return True
def search_by_attributes(service, list_params=None, **kwargs):
"""
Search for the entity by attributes. Nested entities don't support search
via REST, so in case using search for nested entity we return all entities
and filter them by specified attributes.
"""
list_params = list_params or {}
# Check if 'list' method support search(look for search parameter):
if 'search' in inspect.getargspec(service.list)[0]:
res = service.list(
search=' and '.join('{0}={1}'.format(k, v) for k, v in kwargs.items()),
**list_params
)
else:
res = [
e for e in service.list(**list_params) if len([
k for k, v in kwargs.items() if getattr(e, k, None) == v
]) == len(kwargs)
]
res = res or [None]
return res[0]
def search_by_name(service, name, **kwargs):
"""
Search for the entity by its name. Nested entities don't support search
via REST, so in case using search for nested entity we return all entities
and filter them by name.
:param service: service of the entity
:param name: name of the entity
:return: Entity object returned by Python SDK
"""
# Check if 'list' method support search(look for search parameter):
if 'search' in inspect.getargspec(service.list)[0]:
res = service.list(
search="name={name}".format(name=name)
)
else:
res = [e for e in service.list() if e.name == name]
if kwargs:
res = [
e for e in service.list() if len([
k for k, v in kwargs.items() if getattr(e, k, None) == v
]) == len(kwargs)
]
res = res or [None]
return res[0]
def get_entity(service, get_params=None):
"""
Ignore SDK Error in case of getting an entity from service.
"""
entity = None
try:
if get_params is not None:
entity = service.get(**get_params)
else:
entity = service.get()
except sdk.Error:
# We can get here 404, we should ignore it, in case
# of removing entity for example.
pass
return entity
def get_id_by_name(service, name, raise_error=True, ignore_case=False):
"""
Search an entity ID by it's name.
"""
entity = search_by_name(service, name)
if entity is not None:
return entity.id
if raise_error:
raise Exception("Entity '%s' was not found." % name)
def wait(
service,
condition,
fail_condition=lambda e: False,
timeout=180,
wait=True,
poll_interval=3,
):
"""
Wait until entity fulfill expected condition.
:param service: service of the entity
:param condition: condition to be fulfilled
:param fail_condition: if this condition is true, raise Exception
:param timeout: max time to wait in seconds
:param wait: if True wait for condition, if False don't wait
:param poll_interval: Number of seconds we should wait until next condition check
"""
# Wait until the desired state of the entity:
if wait:
start = time.time()
while time.time() < start + timeout:
# Exit if the condition of entity is valid:
entity = get_entity(service)
if condition(entity):
return
elif fail_condition(entity):
raise Exception("Error while waiting on result state of the entity.")
# Sleep for `poll_interval` seconds if none of the conditions apply:
time.sleep(float(poll_interval))
raise Exception("Timeout exceed while waiting on result state of the entity.")
def __get_auth_dict():
OVIRT_URL = os.environ.get('OVIRT_URL')
OVIRT_USERNAME = os.environ.get('OVIRT_USERNAME')
OVIRT_PASSWORD = os.environ.get('OVIRT_PASSWORD')
OVIRT_TOKEN = os.environ.get('OVIRT_TOKEN')
OVIRT_CAFILE = os.environ.get('OVIRT_CAFILE')
OVIRT_INSECURE = OVIRT_CAFILE is None
env_vars = None
if OVIRT_URL and ((OVIRT_USERNAME and OVIRT_PASSWORD) or OVIRT_TOKEN):
env_vars = {
'url': OVIRT_URL,
'username': OVIRT_USERNAME,
'password': OVIRT_PASSWORD,
'insecure': OVIRT_INSECURE,
'token': OVIRT_TOKEN,
'ca_file': OVIRT_CAFILE,
}
if env_vars is not None:
auth = dict(default=env_vars, type='dict')
else:
auth = dict(required=True, type='dict')
return auth
def ovirt_facts_full_argument_spec(**kwargs):
"""
Extend parameters of facts module with parameters which are common to all
oVirt facts modules.
:param kwargs: kwargs to be extended
:return: extended dictionary with common parameters
"""
spec = dict(
auth=__get_auth_dict(),
fetch_nested=dict(default=False, type='bool'),
nested_attributes=dict(type='list', default=list()),
)
spec.update(kwargs)
return spec
def ovirt_full_argument_spec(**kwargs):
"""
Extend parameters of module with parameters which are common to all oVirt modules.
:param kwargs: kwargs to be extended
:return: extended dictionary with common parameters
"""
spec = dict(
auth=__get_auth_dict(),
timeout=dict(default=180, type='int'),
wait=dict(default=True, type='bool'),
poll_interval=dict(default=3, type='int'),
fetch_nested=dict(default=False, type='bool'),
nested_attributes=dict(type='list', default=list()),
)
spec.update(kwargs)
return spec
def check_params(module):
"""
Most modules must have either `name` or `id` specified.
"""
if module.params.get('name') is None and module.params.get('id') is None:
module.fail_json(msg='"name" or "id" is required')
def engine_supported(connection, version):
return LooseVersion(engine_version(connection)) >= LooseVersion(version)
def check_support(version, connection, module, params):
"""
Check if parameters used by user are supported by oVirt Python SDK
and oVirt engine.
"""
api_version = LooseVersion(engine_version(connection))
version = LooseVersion(version)
for param in params:
if module.params.get(param) is not None:
return LooseVersion(sdk_version.VERSION) >= version and api_version >= version
return True
class BaseModule(object):
"""
This is base class for oVirt modules. oVirt modules should inherit this
class and override method to customize specific needs of the module.
The only abstract method of this class is `build_entity`, which must
to be implemented in child class.
"""
__metaclass__ = ABCMeta
def __init__(self, connection, module, service, changed=False):
self._connection = connection
self._module = module
self._service = service
self._changed = changed
self._diff = {'after': dict(), 'before': dict()}
@property
def changed(self):
return self._changed
@changed.setter
def changed(self, changed):
if not self._changed:
self._changed = changed
@abstractmethod
def build_entity(self):
"""
This method should return oVirt Python SDK type, which we want to
create or update, initialized by values passed by Ansible module.
For example if we want to create VM, we will return following:
types.Vm(name=self._module.params['vm_name'])
:return: Specific instance of sdk.Struct.
"""
pass
def param(self, name, default=None):
"""
Return a module parameter specified by it's name.
"""
return self._module.params.get(name, default)
def update_check(self, entity):
"""
This method handle checks whether the entity values are same as values
passed to ansible module. By default we don't compare any values.
:param entity: Entity we want to compare with Ansible module values.
:return: True if values are same, so we don't need to update the entity.
"""
return True
def pre_create(self, entity):
"""
This method is called right before entity is created.
:param entity: Entity to be created or updated.
"""
pass
def post_create(self, entity):
"""
This method is called right after entity is created.
:param entity: Entity which was created.
"""
pass
def post_update(self, entity):
"""
This method is called right after entity is updated.
:param entity: Entity which was updated.
"""
pass
def diff_update(self, after, update):
for k, v in update.items():
if isinstance(v, collections.Mapping):
after[k] = self.diff_update(after.get(k, dict()), v)
else:
after[k] = update[k]
return after
def create(
self,
entity=None,
result_state=None,
fail_condition=lambda e: False,
search_params=None,
update_params=None,
**kwargs
):
"""
Method which is called when state of the entity is 'present'. If user
don't provide `entity` parameter the entity is searched using
`search_params` parameter. If entity is found it's updated, whether
the entity should be updated is checked by `update_check` method.
The corresponding updated entity is build by `build_entity` method.
Function executed after entity is created can optionally be specified
in `post_create` parameter. Function executed after entity is updated
can optionally be specified in `post_update` parameter.
:param entity: Entity we want to update, if exists.
:param result_state: State which should entity has in order to finish task.
:param fail_condition: Function which checks incorrect state of entity, if it returns `True` Exception is raised.
:param search_params: Dictionary of parameters to be used for search.
:param update_params: The params which should be passed to update method.
:param kwargs: Additional parameters passed when creating entity.
:return: Dictionary with values returned by Ansible module.
"""
if entity is None:
entity = self.search_entity(search_params)
self.pre_create(entity)
if entity:
# Entity exists, so update it:
entity_service = self._service.service(entity.id)
if not self.update_check(entity):
new_entity = self.build_entity()
if not self._module.check_mode:
update_params = update_params or {}
updated_entity = entity_service.update(
new_entity,
**update_params
)
self.post_update(entity)
# Update diffs only if user specified --diff parameter,
# so we don't useless overload API:
if self._module._diff:
before = get_dict_of_struct(
entity,
self._connection,
fetch_nested=True,
attributes=['name'],
)
after = before.copy()
self.diff_update(after, get_dict_of_struct(new_entity))
self._diff['before'] = before
self._diff['after'] = after
self.changed = True
else:
# Entity don't exists, so create it:
if not self._module.check_mode:
entity = self._service.add(
self.build_entity(),
**kwargs
)
self.post_create(entity)
self.changed = True
# Wait for the entity to be created and to be in the defined state:
entity_service = self._service.service(entity.id)
def state_condition(entity):
return entity
if result_state:
def state_condition(entity):
return entity and entity.status == result_state
wait(
service=entity_service,
condition=state_condition,
fail_condition=fail_condition,
wait=self._module.params['wait'],
timeout=self._module.params['timeout'],
poll_interval=self._module.params['poll_interval'],
)
return {
'changed': self.changed,
'id': entity.id,
type(entity).__name__.lower(): get_dict_of_struct(
struct=entity,
connection=self._connection,
fetch_nested=self._module.params.get('fetch_nested'),
attributes=self._module.params.get('nested_attributes'),
),
'diff': self._diff,
}
def pre_remove(self, entity):
"""
This method is called right before entity is removed.
:param entity: Entity which we want to remove.
"""
pass
def entity_name(self, entity):
return "{e_type} '{e_name}'".format(
e_type=type(entity).__name__.lower(),
e_name=getattr(entity, 'name', None),
)
def remove(self, entity=None, search_params=None, **kwargs):
"""
Method which is called when state of the entity is 'absent'. If user
don't provide `entity` parameter the entity is searched using
`search_params` parameter. If entity is found it's removed.
Function executed before remove is executed can optionally be specified
in `pre_remove` parameter.
:param entity: Entity we want to remove.
:param search_params: Dictionary of parameters to be used for search.
:param kwargs: Additional parameters passed when removing entity.
:return: Dictionary with values returned by Ansible module.
"""
if entity is None:
entity = self.search_entity(search_params)
if entity is None:
return {
'changed': self.changed,
'msg': "Entity wasn't found."
}
self.pre_remove(entity)
entity_service = self._service.service(entity.id)
if not self._module.check_mode:
entity_service.remove(**kwargs)
wait(
service=entity_service,
condition=lambda entity: not entity,
wait=self._module.params['wait'],
timeout=self._module.params['timeout'],
poll_interval=self._module.params['poll_interval'],
)
self.changed = True
return {
'changed': self.changed,
'id': entity.id,
type(entity).__name__.lower(): get_dict_of_struct(
struct=entity,
connection=self._connection,
fetch_nested=self._module.params.get('fetch_nested'),
attributes=self._module.params.get('nested_attributes'),
),
}
def action(
self,
action,
entity=None,
action_condition=lambda e: e,
wait_condition=lambda e: e,
fail_condition=lambda e: False,
pre_action=lambda e: e,
post_action=lambda e: None,
search_params=None,
**kwargs
):
"""
This method is executed when we want to change the state of some oVirt
entity. The action to be executed on oVirt service is specified by
`action` parameter. Whether the action should be executed can be
specified by passing `action_condition` parameter. State which the
entity should be in after execution of the action can be specified
by `wait_condition` parameter.
Function executed before an action on entity can optionally be specified
in `pre_action` parameter. Function executed after an action on entity can
optionally be specified in `post_action` parameter.
:param action: Action which should be executed by service on entity.
:param entity: Entity we want to run action on.
:param action_condition: Function which is executed when checking if action should be executed.
:param fail_condition: Function which checks incorrect state of entity, if it returns `True` Exception is raised.
:param wait_condition: Function which is executed when waiting on result state.
:param pre_action: Function which is executed before running the action.
:param post_action: Function which is executed after running the action.
:param search_params: Dictionary of parameters to be used for search.
:param kwargs: Additional parameters passed to action.
:return: Dictionary with values returned by Ansible module.
"""
if entity is None:
entity = self.search_entity(search_params)
entity = pre_action(entity)
if entity is None:
self._module.fail_json(
msg="Entity not found, can't run action '{0}'.".format(
action
)
)
entity_service = self._service.service(entity.id)
entity = entity_service.get()
if action_condition(entity):
if not self._module.check_mode:
getattr(entity_service, action)(**kwargs)
self.changed = True
post_action(entity)
wait(
service=self._service.service(entity.id),
condition=wait_condition,
fail_condition=fail_condition,
wait=self._module.params['wait'],
timeout=self._module.params['timeout'],
poll_interval=self._module.params['poll_interval'],
)
return {
'changed': self.changed,
'id': entity.id,
type(entity).__name__.lower(): get_dict_of_struct(
struct=entity,
connection=self._connection,
fetch_nested=self._module.params.get('fetch_nested'),
attributes=self._module.params.get('nested_attributes'),
),
'diff': self._diff,
}
def wait_for_import(self, condition=lambda e: True):
if self._module.params['wait']:
start = time.time()
timeout = self._module.params['timeout']
poll_interval = self._module.params['poll_interval']
while time.time() < start + timeout:
entity = self.search_entity()
if entity and condition(entity):
return entity
time.sleep(poll_interval)
def search_entity(self, search_params=None, list_params=None):
"""
Always first try to search by `ID`, if ID isn't specified,
check if user constructed special search in `search_params`,
if not search by `name`.
"""
entity = None
if 'id' in self._module.params and self._module.params['id'] is not None:
entity = get_entity(self._service.service(self._module.params['id']), get_params=list_params)
elif search_params is not None:
entity = search_by_attributes(self._service, list_params=list_params, **search_params)
elif self._module.params.get('name') is not None:
entity = search_by_attributes(self._service, list_params=list_params, name=self._module.params['name'])
return entity
| gpl-3.0 | -4,524,963,097,820,722,700 | 32.437179 | 121 | 0.595721 | false |
lenstr/rethinkdb | test/rql_test/connections/http_support/werkzeug/testsuite/multipart/collect.py | 248 | 1584 | #!/usr/bin/env python
"""
Hacky helper application to collect form data.
"""
from werkzeug.serving import run_simple
from werkzeug.wrappers import Request, Response
def copy_stream(request):
from os import mkdir
from time import time
folder = 'request-%d' % time()
mkdir(folder)
environ = request.environ
f = open(folder + '/request.txt', 'wb+')
f.write(environ['wsgi.input'].read(int(environ['CONTENT_LENGTH'])))
f.flush()
f.seek(0)
environ['wsgi.input'] = f
request.stat_folder = folder
def stats(request):
copy_stream(request)
f1 = request.files['file1']
f2 = request.files['file2']
text = request.form['text']
f1.save(request.stat_folder + '/file1.bin')
f2.save(request.stat_folder + '/file2.bin')
open(request.stat_folder + '/text.txt', 'w').write(text.encode('utf-8'))
return Response('Done.')
def upload_file(request):
return Response('''
<h1>Upload File</h1>
<form action="" method="post" enctype="multipart/form-data">
<input type="file" name="file1"><br>
<input type="file" name="file2"><br>
<textarea name="text"></textarea><br>
<input type="submit" value="Send">
</form>
''', mimetype='text/html')
def application(environ, start_responseonse):
request = Request(environ)
if request.method == 'POST':
response = stats(request)
else:
response = upload_file(request)
return response(environ, start_responseonse)
if __name__ == '__main__':
run_simple('localhost', 5000, application, use_debugger=True)
| agpl-3.0 | 3,515,731,322,545,033,000 | 27.285714 | 76 | 0.636364 | false |
philippjfr/bokeh | bokeh/embed/tests/test_server.py | 1 | 11784 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
from bokeh.util.api import INTERNAL, PUBLIC ; INTERNAL, PUBLIC
from bokeh.util.testing import verify_api ; verify_api
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
import bs4
# Bokeh imports
# Module under test
import bokeh.embed.server as bes
#-----------------------------------------------------------------------------
# API Definition
#-----------------------------------------------------------------------------
api = {
PUBLIC: (
( 'server_document', (1, 0, 0) ),
( 'server_session', (1, 0, 0) ),
), INTERNAL: (
( 'server_html_page_for_session', (1, 0, 0) ),
)
}
Test_api = verify_api(bes, api)
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
@pytest.fixture
def test_plot():
from bokeh.plotting import figure
test_plot = figure()
test_plot.circle([1, 2], [2, 3])
return test_plot
#-----------------------------------------------------------------------------
# Public API
#-----------------------------------------------------------------------------
class TestServerDocument(object):
def test_invalid_resources_param(self):
with pytest.raises(ValueError):
bes.server_document(url="http://localhost:8081/foo/bar/sliders", resources=123)
with pytest.raises(ValueError):
bes.server_document(url="http://localhost:8081/foo/bar/sliders", resources="whatever")
def test_resources_default_is_implicit(self):
r = bes.server_document(url="http://localhost:8081/foo/bar/sliders", resources="default")
assert 'resources=' not in r
def test_resources_none(self):
r = bes.server_document(url="http://localhost:8081/foo/bar/sliders", resources=None)
assert 'resources=none' in r
def test_general(self):
r = bes.server_document(url="http://localhost:8081/foo/bar/sliders")
assert 'bokeh-app-path=/foo/bar/sliders' in r
assert 'bokeh-absolute-url=http://localhost:8081/foo/bar/sliders' in r
html = bs4.BeautifulSoup(r, "lxml")
scripts = html.findAll(name='script')
assert len(scripts) == 1
attrs = scripts[0].attrs
assert set(attrs), set([
'src',
'data-bokeh-doc-id',
'data-bokeh-model-id',
'id'
])
divid = attrs['id']
src = "%s/autoload.js?bokeh-autoload-element=%s&bokeh-app-path=/foo/bar/sliders&bokeh-absolute-url=%s" % \
("http://localhost:8081/foo/bar/sliders", divid, "http://localhost:8081/foo/bar/sliders")
assert attrs == { 'data-bokeh-doc-id' : '',
'data-bokeh-model-id' : '',
'id' : divid,
'src' : src }
def test_script_attrs_arguments_provided(self):
r = bes.server_document(arguments=dict(foo=10))
assert 'foo=10' in r
html = bs4.BeautifulSoup(r, "lxml")
scripts = html.findAll(name='script')
assert len(scripts) == 1
attrs = scripts[0].attrs
assert set(attrs) == set([
'src',
'data-bokeh-doc-id',
'data-bokeh-model-id',
'id'
])
divid = attrs['id']
src = "%s/autoload.js?bokeh-autoload-element=%s&bokeh-absolute-url=%s&foo=10" % \
("http://localhost:5006", divid, "http://localhost:5006")
assert attrs == { 'data-bokeh-doc-id' : '',
'data-bokeh-model-id' : '',
'id' : divid,
'src' : src }
def test_script_attrs_url_provided_absolute_resources(self):
r = bes.server_document(url="http://localhost:8081/foo/bar/sliders")
assert 'bokeh-app-path=/foo/bar/sliders' in r
assert 'bokeh-absolute-url=http://localhost:8081/foo/bar/sliders' in r
html = bs4.BeautifulSoup(r, "lxml")
scripts = html.findAll(name='script')
assert len(scripts) == 1
attrs = scripts[0].attrs
assert set(attrs) == set([
'src',
'data-bokeh-doc-id',
'data-bokeh-model-id',
'id'
])
divid = attrs['id']
src = "%s/autoload.js?bokeh-autoload-element=%s&bokeh-app-path=/foo/bar/sliders&bokeh-absolute-url=%s" % \
("http://localhost:8081/foo/bar/sliders", divid, "http://localhost:8081/foo/bar/sliders")
assert attrs == { 'data-bokeh-doc-id' : '',
'data-bokeh-model-id' : '',
'id' : divid,
'src' : src }
def test_script_attrs_url_provided(self):
r = bes.server_document(url="http://localhost:8081/foo/bar/sliders", relative_urls=True)
assert 'bokeh-app-path=/foo/bar/sliders' in r
html = bs4.BeautifulSoup(r, "lxml")
scripts = html.findAll(name='script')
assert len(scripts) == 1
attrs = scripts[0].attrs
assert set(attrs) == set([
'src',
'data-bokeh-doc-id',
'data-bokeh-model-id',
'id'
])
divid = attrs['id']
src = "%s/autoload.js?bokeh-autoload-element=%s&bokeh-app-path=/foo/bar/sliders" % \
("http://localhost:8081/foo/bar/sliders", divid)
assert attrs == { 'data-bokeh-doc-id' : '',
'data-bokeh-model-id' : '',
'id' : divid,
'src' : src }
class TestServerSession(object):
def test_return_type(self, test_plot):
r = bes.server_session(test_plot, session_id='fakesession')
assert isinstance(r, str)
def test_script_attrs_session_id_provided(self, test_plot):
r = bes.server_session(test_plot, session_id='fakesession')
assert 'bokeh-session-id=fakesession' in r
html = bs4.BeautifulSoup(r, "lxml")
scripts = html.findAll(name='script')
assert len(scripts) == 1
attrs = scripts[0].attrs
assert set(attrs) == set([
'src',
'data-bokeh-doc-id',
'data-bokeh-model-id',
'id'
])
divid = attrs['id']
src = "%s/autoload.js?bokeh-autoload-element=%s&bokeh-absolute-url=%s&bokeh-session-id=fakesession" % \
("http://localhost:5006", divid, "http://localhost:5006")
assert attrs == { 'data-bokeh-doc-id' : '',
'data-bokeh-model-id' : str(test_plot._id),
'id' : divid,
'src' : src }
def test_invalid_resources_param(self, test_plot):
with pytest.raises(ValueError):
bes.server_session(test_plot, session_id='fakesession', resources=123)
with pytest.raises(ValueError):
bes.server_session(test_plot, session_id='fakesession', resources="whatever")
def test_resources_default_is_implicit(self, test_plot):
r = bes.server_session(test_plot, session_id='fakesession', resources="default")
assert 'resources=' not in r
def test_resources_none(self, test_plot):
r = bes.server_session(test_plot, session_id='fakesession', resources=None)
assert 'resources=none' in r
def test_general(self, test_plot):
r = bes.server_session(test_plot, session_id='fakesession')
assert 'bokeh-session-id=fakesession' in r
html = bs4.BeautifulSoup(r, "lxml")
scripts = html.findAll(name='script')
assert len(scripts) == 1
attrs = scripts[0].attrs
assert set(attrs), set([
'src',
'data-bokeh-doc-id',
'data-bokeh-model-id',
'id'
])
divid = attrs['id']
src = "%s/autoload.js?bokeh-autoload-element=%s&bokeh-absolute-url=%s&bokeh-session-id=fakesession" % \
("http://localhost:5006", divid, "http://localhost:5006")
assert attrs == { 'data-bokeh-doc-id' : '',
'data-bokeh-model-id' : str(test_plot._id),
'id' : divid,
'src' : src }
#-----------------------------------------------------------------------------
# Internal API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
class Test__clean_url(object):
def test_default(self):
assert bes._clean_url("default") == bes.DEFAULT_SERVER_HTTP_URL.rstrip("/")
def test_bad_ws(self):
with pytest.raises(ValueError):
bes._clean_url("ws://foo")
def test_arg(self):
assert bes._clean_url("http://foo/bar") == "http://foo/bar"
assert bes._clean_url("http://foo/bar/") == "http://foo/bar"
class Test__get_app_path(object):
def test_arg(self):
assert bes._get_app_path("foo") == "/foo"
assert bes._get_app_path("http://foo") == "/"
assert bes._get_app_path("http://foo/bar") == "/bar"
assert bes._get_app_path("https://foo") == "/"
assert bes._get_app_path("https://foo/bar") == "/bar"
class Test__process_arguments(object):
def test_None(self):
assert bes._process_arguments(None) == ""
def test_args(self):
args = dict(foo=10, bar="baz")
r = bes._process_arguments(args)
# order unspecified
assert r == "&foo=10&bar=baz" or r == "&bar=baz&foo=10"
def test_args_ignores_bokeh_prefixed(self):
args = dict(foo=10, bar="baz")
args["bokeh-junk"] = 20
r = bes._process_arguments(args)
# order unspecified
assert r == "&foo=10&bar=baz" or r == "&bar=baz&foo=10"
class Test__process_app_path(object):
def test_root(self):
assert bes._process_app_path("/") == ""
def test_arg(self):
assert bes._process_app_path("/stuff") == "&bokeh-app-path=/stuff"
class Test__process_relative_urls(object):
def test_True(self):
assert bes._process_relative_urls(True, "") == ""
assert bes._process_relative_urls(True, "/stuff") == ""
def test_Flase(self):
assert bes._process_relative_urls(False, "/stuff") == "&bokeh-absolute-url=/stuff"
class Test__process_resources(object):
def test_bad_input(self):
with pytest.raises(ValueError):
bes._process_resources("foo")
def test_None(self):
assert bes._process_resources(None) == "&resources=none"
def test_default(self):
assert bes._process_resources("default") == ""
class Test__process_session_id(object):
def test_arg(self):
assert bes._process_session_id("foo123") == "&bokeh-session-id=foo123"
def Test__src_path(object):
def test_args(self):
assert bes._src_path("http://foo", "1234") =="http://foo/autoload.js?bokeh-autoload-element=1234"
| bsd-3-clause | -8,731,078,012,785,231,000 | 36.056604 | 114 | 0.506365 | false |
google/gfw-toolkit | toolkit/third_party/oauth2client/client.py | 122 | 44282 | # Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An OAuth 2.0 client.
Tools for interacting with OAuth 2.0 protected resources.
"""
__author__ = '[email protected] (Joe Gregorio)'
import base64
import clientsecrets
import copy
import datetime
import httplib2
import logging
import os
import sys
import time
import urllib
import urlparse
from oauth2client import GOOGLE_AUTH_URI
from oauth2client import GOOGLE_REVOKE_URI
from oauth2client import GOOGLE_TOKEN_URI
from oauth2client import util
from oauth2client.anyjson import simplejson
HAS_OPENSSL = False
HAS_CRYPTO = False
try:
from oauth2client import crypt
HAS_CRYPTO = True
if crypt.OpenSSLVerifier is not None:
HAS_OPENSSL = True
except ImportError:
pass
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
logger = logging.getLogger(__name__)
# Expiry is stored in RFC3339 UTC format
EXPIRY_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
# Which certs to use to validate id_tokens received.
ID_TOKEN_VERIFICATON_CERTS = 'https://www.googleapis.com/oauth2/v1/certs'
# Constant to use for the out of band OAuth 2.0 flow.
OOB_CALLBACK_URN = 'urn:ietf:wg:oauth:2.0:oob'
# Google Data client libraries may need to set this to [401, 403].
REFRESH_STATUS_CODES = [401]
class Error(Exception):
"""Base error for this module."""
class FlowExchangeError(Error):
"""Error trying to exchange an authorization grant for an access token."""
class AccessTokenRefreshError(Error):
"""Error trying to refresh an expired access token."""
class TokenRevokeError(Error):
"""Error trying to revoke a token."""
class UnknownClientSecretsFlowError(Error):
"""The client secrets file called for an unknown type of OAuth 2.0 flow. """
class AccessTokenCredentialsError(Error):
"""Having only the access_token means no refresh is possible."""
class VerifyJwtTokenError(Error):
"""Could on retrieve certificates for validation."""
class NonAsciiHeaderError(Error):
"""Header names and values must be ASCII strings."""
def _abstract():
raise NotImplementedError('You need to override this function')
class MemoryCache(object):
"""httplib2 Cache implementation which only caches locally."""
def __init__(self):
self.cache = {}
def get(self, key):
return self.cache.get(key)
def set(self, key, value):
self.cache[key] = value
def delete(self, key):
self.cache.pop(key, None)
class Credentials(object):
"""Base class for all Credentials objects.
Subclasses must define an authorize() method that applies the credentials to
an HTTP transport.
Subclasses must also specify a classmethod named 'from_json' that takes a JSON
string as input and returns an instaniated Credentials object.
"""
NON_SERIALIZED_MEMBERS = ['store']
def authorize(self, http):
"""Take an httplib2.Http instance (or equivalent) and authorizes it.
Authorizes it for the set of credentials, usually by replacing
http.request() with a method that adds in the appropriate headers and then
delegates to the original Http.request() method.
Args:
http: httplib2.Http, an http object to be used to make the refresh
request.
"""
_abstract()
def refresh(self, http):
"""Forces a refresh of the access_token.
Args:
http: httplib2.Http, an http object to be used to make the refresh
request.
"""
_abstract()
def revoke(self, http):
"""Revokes a refresh_token and makes the credentials void.
Args:
http: httplib2.Http, an http object to be used to make the revoke
request.
"""
_abstract()
def apply(self, headers):
"""Add the authorization to the headers.
Args:
headers: dict, the headers to add the Authorization header to.
"""
_abstract()
def _to_json(self, strip):
"""Utility function that creates JSON repr. of a Credentials object.
Args:
strip: array, An array of names of members to not include in the JSON.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
t = type(self)
d = copy.copy(self.__dict__)
for member in strip:
if member in d:
del d[member]
if 'token_expiry' in d and isinstance(d['token_expiry'], datetime.datetime):
d['token_expiry'] = d['token_expiry'].strftime(EXPIRY_FORMAT)
# Add in information we will need later to reconsistitue this instance.
d['_class'] = t.__name__
d['_module'] = t.__module__
return simplejson.dumps(d)
def to_json(self):
"""Creating a JSON representation of an instance of Credentials.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
return self._to_json(Credentials.NON_SERIALIZED_MEMBERS)
@classmethod
def new_from_json(cls, s):
"""Utility class method to instantiate a Credentials subclass from a JSON
representation produced by to_json().
Args:
s: string, JSON from to_json().
Returns:
An instance of the subclass of Credentials that was serialized with
to_json().
"""
data = simplejson.loads(s)
# Find and call the right classmethod from_json() to restore the object.
module = data['_module']
try:
m = __import__(module)
except ImportError:
# In case there's an object from the old package structure, update it
module = module.replace('.apiclient', '')
m = __import__(module)
m = __import__(module, fromlist=module.split('.')[:-1])
kls = getattr(m, data['_class'])
from_json = getattr(kls, 'from_json')
return from_json(s)
@classmethod
def from_json(cls, s):
"""Instantiate a Credentials object from a JSON description of it.
The JSON should have been produced by calling .to_json() on the object.
Args:
data: dict, A deserialized JSON object.
Returns:
An instance of a Credentials subclass.
"""
return Credentials()
class Flow(object):
"""Base class for all Flow objects."""
pass
class Storage(object):
"""Base class for all Storage objects.
Store and retrieve a single credential. This class supports locking
such that multiple processes and threads can operate on a single
store.
"""
def acquire_lock(self):
"""Acquires any lock necessary to access this Storage.
This lock is not reentrant.
"""
pass
def release_lock(self):
"""Release the Storage lock.
Trying to release a lock that isn't held will result in a
RuntimeError.
"""
pass
def locked_get(self):
"""Retrieve credential.
The Storage lock must be held when this is called.
Returns:
oauth2client.client.Credentials
"""
_abstract()
def locked_put(self, credentials):
"""Write a credential.
The Storage lock must be held when this is called.
Args:
credentials: Credentials, the credentials to store.
"""
_abstract()
def locked_delete(self):
"""Delete a credential.
The Storage lock must be held when this is called.
"""
_abstract()
def get(self):
"""Retrieve credential.
The Storage lock must *not* be held when this is called.
Returns:
oauth2client.client.Credentials
"""
self.acquire_lock()
try:
return self.locked_get()
finally:
self.release_lock()
def put(self, credentials):
"""Write a credential.
The Storage lock must be held when this is called.
Args:
credentials: Credentials, the credentials to store.
"""
self.acquire_lock()
try:
self.locked_put(credentials)
finally:
self.release_lock()
def delete(self):
"""Delete credential.
Frees any resources associated with storing the credential.
The Storage lock must *not* be held when this is called.
Returns:
None
"""
self.acquire_lock()
try:
return self.locked_delete()
finally:
self.release_lock()
def clean_headers(headers):
"""Forces header keys and values to be strings, i.e not unicode.
The httplib module just concats the header keys and values in a way that may
make the message header a unicode string, which, if it then tries to
contatenate to a binary request body may result in a unicode decode error.
Args:
headers: dict, A dictionary of headers.
Returns:
The same dictionary but with all the keys converted to strings.
"""
clean = {}
try:
for k, v in headers.iteritems():
clean[str(k)] = str(v)
except UnicodeEncodeError:
raise NonAsciiHeaderError(k + ': ' + v)
return clean
def _update_query_params(uri, params):
"""Updates a URI with new query parameters.
Args:
uri: string, A valid URI, with potential existing query parameters.
params: dict, A dictionary of query parameters.
Returns:
The same URI but with the new query parameters added.
"""
parts = list(urlparse.urlparse(uri))
query_params = dict(parse_qsl(parts[4])) # 4 is the index of the query part
query_params.update(params)
parts[4] = urllib.urlencode(query_params)
return urlparse.urlunparse(parts)
class OAuth2Credentials(Credentials):
"""Credentials object for OAuth 2.0.
Credentials can be applied to an httplib2.Http object using the authorize()
method, which then adds the OAuth 2.0 access token to each request.
OAuth2Credentials objects may be safely pickled and unpickled.
"""
@util.positional(8)
def __init__(self, access_token, client_id, client_secret, refresh_token,
token_expiry, token_uri, user_agent, revoke_uri=None,
id_token=None, token_response=None):
"""Create an instance of OAuth2Credentials.
This constructor is not usually called by the user, instead
OAuth2Credentials objects are instantiated by the OAuth2WebServerFlow.
Args:
access_token: string, access token.
client_id: string, client identifier.
client_secret: string, client secret.
refresh_token: string, refresh token.
token_expiry: datetime, when the access_token expires.
token_uri: string, URI of token endpoint.
user_agent: string, The HTTP User-Agent to provide for this application.
revoke_uri: string, URI for revoke endpoint. Defaults to None; a token
can't be revoked if this is None.
id_token: object, The identity of the resource owner.
token_response: dict, the decoded response to the token request. None
if a token hasn't been requested yet. Stored because some providers
(e.g. wordpress.com) include extra fields that clients may want.
Notes:
store: callable, A callable that when passed a Credential
will store the credential back to where it came from.
This is needed to store the latest access_token if it
has expired and been refreshed.
"""
self.access_token = access_token
self.client_id = client_id
self.client_secret = client_secret
self.refresh_token = refresh_token
self.store = None
self.token_expiry = token_expiry
self.token_uri = token_uri
self.user_agent = user_agent
self.revoke_uri = revoke_uri
self.id_token = id_token
self.token_response = token_response
# True if the credentials have been revoked or expired and can't be
# refreshed.
self.invalid = False
def authorize(self, http):
"""Authorize an httplib2.Http instance with these credentials.
The modified http.request method will add authentication headers to each
request and will refresh access_tokens when a 401 is received on a
request. In addition the http.request method has a credentials property,
http.request.credentials, which is the Credentials object that authorized
it.
Args:
http: An instance of httplib2.Http
or something that acts like it.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = credentials.authorize(h)
You can't create a new OAuth subclass of httplib2.Authenication
because it never gets passed the absolute URI, which is needed for
signing. So instead we have to overload 'request' with a closure
that adds in the Authorization header and then calls the original
version of 'request()'.
"""
request_orig = http.request
# The closure that will replace 'httplib2.Http.request'.
@util.positional(1)
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
if not self.access_token:
logger.info('Attempting refresh to obtain initial access_token')
self._refresh(request_orig)
# Modify the request headers to add the appropriate
# Authorization header.
if headers is None:
headers = {}
self.apply(headers)
if self.user_agent is not None:
if 'user-agent' in headers:
headers['user-agent'] = self.user_agent + ' ' + headers['user-agent']
else:
headers['user-agent'] = self.user_agent
resp, content = request_orig(uri, method, body, clean_headers(headers),
redirections, connection_type)
if resp.status in REFRESH_STATUS_CODES:
logger.info('Refreshing due to a %s' % str(resp.status))
self._refresh(request_orig)
self.apply(headers)
return request_orig(uri, method, body, clean_headers(headers),
redirections, connection_type)
else:
return (resp, content)
# Replace the request method with our own closure.
http.request = new_request
# Set credentials as a property of the request method.
setattr(http.request, 'credentials', self)
return http
def refresh(self, http):
"""Forces a refresh of the access_token.
Args:
http: httplib2.Http, an http object to be used to make the refresh
request.
"""
self._refresh(http.request)
def revoke(self, http):
"""Revokes a refresh_token and makes the credentials void.
Args:
http: httplib2.Http, an http object to be used to make the revoke
request.
"""
self._revoke(http.request)
def apply(self, headers):
"""Add the authorization to the headers.
Args:
headers: dict, the headers to add the Authorization header to.
"""
headers['Authorization'] = 'Bearer ' + self.access_token
def to_json(self):
return self._to_json(Credentials.NON_SERIALIZED_MEMBERS)
@classmethod
def from_json(cls, s):
"""Instantiate a Credentials object from a JSON description of it. The JSON
should have been produced by calling .to_json() on the object.
Args:
data: dict, A deserialized JSON object.
Returns:
An instance of a Credentials subclass.
"""
data = simplejson.loads(s)
if 'token_expiry' in data and not isinstance(data['token_expiry'],
datetime.datetime):
try:
data['token_expiry'] = datetime.datetime.strptime(
data['token_expiry'], EXPIRY_FORMAT)
except:
data['token_expiry'] = None
retval = cls(
data['access_token'],
data['client_id'],
data['client_secret'],
data['refresh_token'],
data['token_expiry'],
data['token_uri'],
data['user_agent'],
revoke_uri=data.get('revoke_uri', None),
id_token=data.get('id_token', None),
token_response=data.get('token_response', None))
retval.invalid = data['invalid']
return retval
@property
def access_token_expired(self):
"""True if the credential is expired or invalid.
If the token_expiry isn't set, we assume the token doesn't expire.
"""
if self.invalid:
return True
if not self.token_expiry:
return False
now = datetime.datetime.utcnow()
if now >= self.token_expiry:
logger.info('access_token is expired. Now: %s, token_expiry: %s',
now, self.token_expiry)
return True
return False
def set_store(self, store):
"""Set the Storage for the credential.
Args:
store: Storage, an implementation of Stroage object.
This is needed to store the latest access_token if it
has expired and been refreshed. This implementation uses
locking to check for updates before updating the
access_token.
"""
self.store = store
def _updateFromCredential(self, other):
"""Update this Credential from another instance."""
self.__dict__.update(other.__getstate__())
def __getstate__(self):
"""Trim the state down to something that can be pickled."""
d = copy.copy(self.__dict__)
del d['store']
return d
def __setstate__(self, state):
"""Reconstitute the state of the object from being pickled."""
self.__dict__.update(state)
self.store = None
def _generate_refresh_request_body(self):
"""Generate the body that will be used in the refresh request."""
body = urllib.urlencode({
'grant_type': 'refresh_token',
'client_id': self.client_id,
'client_secret': self.client_secret,
'refresh_token': self.refresh_token,
})
return body
def _generate_refresh_request_headers(self):
"""Generate the headers that will be used in the refresh request."""
headers = {
'content-type': 'application/x-www-form-urlencoded',
}
if self.user_agent is not None:
headers['user-agent'] = self.user_agent
return headers
def _refresh(self, http_request):
"""Refreshes the access_token.
This method first checks by reading the Storage object if available.
If a refresh is still needed, it holds the Storage lock until the
refresh is completed.
Args:
http_request: callable, a callable that matches the method signature of
httplib2.Http.request, used to make the refresh request.
Raises:
AccessTokenRefreshError: When the refresh fails.
"""
if not self.store:
self._do_refresh_request(http_request)
else:
self.store.acquire_lock()
try:
new_cred = self.store.locked_get()
if (new_cred and not new_cred.invalid and
new_cred.access_token != self.access_token):
logger.info('Updated access_token read from Storage')
self._updateFromCredential(new_cred)
else:
self._do_refresh_request(http_request)
finally:
self.store.release_lock()
def _do_refresh_request(self, http_request):
"""Refresh the access_token using the refresh_token.
Args:
http_request: callable, a callable that matches the method signature of
httplib2.Http.request, used to make the refresh request.
Raises:
AccessTokenRefreshError: When the refresh fails.
"""
body = self._generate_refresh_request_body()
headers = self._generate_refresh_request_headers()
logger.info('Refreshing access_token')
resp, content = http_request(
self.token_uri, method='POST', body=body, headers=headers)
if resp.status == 200:
# TODO(jcgregorio) Raise an error if loads fails?
d = simplejson.loads(content)
self.token_response = d
self.access_token = d['access_token']
self.refresh_token = d.get('refresh_token', self.refresh_token)
if 'expires_in' in d:
self.token_expiry = datetime.timedelta(
seconds=int(d['expires_in'])) + datetime.datetime.utcnow()
else:
self.token_expiry = None
if self.store:
self.store.locked_put(self)
else:
# An {'error':...} response body means the token is expired or revoked,
# so we flag the credentials as such.
logger.info('Failed to retrieve access token: %s' % content)
error_msg = 'Invalid response %s.' % resp['status']
try:
d = simplejson.loads(content)
if 'error' in d:
error_msg = d['error']
self.invalid = True
if self.store:
self.store.locked_put(self)
except StandardError:
pass
raise AccessTokenRefreshError(error_msg)
def _revoke(self, http_request):
"""Revokes the refresh_token and deletes the store if available.
Args:
http_request: callable, a callable that matches the method signature of
httplib2.Http.request, used to make the revoke request.
"""
self._do_revoke(http_request, self.refresh_token)
def _do_revoke(self, http_request, token):
"""Revokes the credentials and deletes the store if available.
Args:
http_request: callable, a callable that matches the method signature of
httplib2.Http.request, used to make the refresh request.
token: A string used as the token to be revoked. Can be either an
access_token or refresh_token.
Raises:
TokenRevokeError: If the revoke request does not return with a 200 OK.
"""
logger.info('Revoking token')
query_params = {'token': token}
token_revoke_uri = _update_query_params(self.revoke_uri, query_params)
resp, content = http_request(token_revoke_uri)
if resp.status == 200:
self.invalid = True
else:
error_msg = 'Invalid response %s.' % resp.status
try:
d = simplejson.loads(content)
if 'error' in d:
error_msg = d['error']
except StandardError:
pass
raise TokenRevokeError(error_msg)
if self.store:
self.store.delete()
class AccessTokenCredentials(OAuth2Credentials):
"""Credentials object for OAuth 2.0.
Credentials can be applied to an httplib2.Http object using the
authorize() method, which then signs each request from that object
with the OAuth 2.0 access token. This set of credentials is for the
use case where you have acquired an OAuth 2.0 access_token from
another place such as a JavaScript client or another web
application, and wish to use it from Python. Because only the
access_token is present it can not be refreshed and will in time
expire.
AccessTokenCredentials objects may be safely pickled and unpickled.
Usage:
credentials = AccessTokenCredentials('<an access token>',
'my-user-agent/1.0')
http = httplib2.Http()
http = credentials.authorize(http)
Exceptions:
AccessTokenCredentialsExpired: raised when the access_token expires or is
revoked.
"""
def __init__(self, access_token, user_agent, revoke_uri=None):
"""Create an instance of OAuth2Credentials
This is one of the few types if Credentials that you should contrust,
Credentials objects are usually instantiated by a Flow.
Args:
access_token: string, access token.
user_agent: string, The HTTP User-Agent to provide for this application.
revoke_uri: string, URI for revoke endpoint. Defaults to None; a token
can't be revoked if this is None.
"""
super(AccessTokenCredentials, self).__init__(
access_token,
None,
None,
None,
None,
None,
user_agent,
revoke_uri=revoke_uri)
@classmethod
def from_json(cls, s):
data = simplejson.loads(s)
retval = AccessTokenCredentials(
data['access_token'],
data['user_agent'])
return retval
def _refresh(self, http_request):
raise AccessTokenCredentialsError(
'The access_token is expired or invalid and can\'t be refreshed.')
def _revoke(self, http_request):
"""Revokes the access_token and deletes the store if available.
Args:
http_request: callable, a callable that matches the method signature of
httplib2.Http.request, used to make the revoke request.
"""
self._do_revoke(http_request, self.access_token)
class AssertionCredentials(OAuth2Credentials):
"""Abstract Credentials object used for OAuth 2.0 assertion grants.
This credential does not require a flow to instantiate because it
represents a two legged flow, and therefore has all of the required
information to generate and refresh its own access tokens. It must
be subclassed to generate the appropriate assertion string.
AssertionCredentials objects may be safely pickled and unpickled.
"""
@util.positional(2)
def __init__(self, assertion_type, user_agent=None,
token_uri=GOOGLE_TOKEN_URI,
revoke_uri=GOOGLE_REVOKE_URI,
**unused_kwargs):
"""Constructor for AssertionFlowCredentials.
Args:
assertion_type: string, assertion type that will be declared to the auth
server
user_agent: string, The HTTP User-Agent to provide for this application.
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
revoke_uri: string, URI for revoke endpoint.
"""
super(AssertionCredentials, self).__init__(
None,
None,
None,
None,
None,
token_uri,
user_agent,
revoke_uri=revoke_uri)
self.assertion_type = assertion_type
def _generate_refresh_request_body(self):
assertion = self._generate_assertion()
body = urllib.urlencode({
'assertion': assertion,
'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer',
})
return body
def _generate_assertion(self):
"""Generate the assertion string that will be used in the access token
request.
"""
_abstract()
def _revoke(self, http_request):
"""Revokes the access_token and deletes the store if available.
Args:
http_request: callable, a callable that matches the method signature of
httplib2.Http.request, used to make the revoke request.
"""
self._do_revoke(http_request, self.access_token)
if HAS_CRYPTO:
# PyOpenSSL and PyCrypto are not prerequisites for oauth2client, so if it is
# missing then don't create the SignedJwtAssertionCredentials or the
# verify_id_token() method.
class SignedJwtAssertionCredentials(AssertionCredentials):
"""Credentials object used for OAuth 2.0 Signed JWT assertion grants.
This credential does not require a flow to instantiate because it represents
a two legged flow, and therefore has all of the required information to
generate and refresh its own access tokens.
SignedJwtAssertionCredentials requires either PyOpenSSL, or PyCrypto 2.6 or
later. For App Engine you may also consider using AppAssertionCredentials.
"""
MAX_TOKEN_LIFETIME_SECS = 3600 # 1 hour in seconds
@util.positional(4)
def __init__(self,
service_account_name,
private_key,
scope,
private_key_password='notasecret',
user_agent=None,
token_uri=GOOGLE_TOKEN_URI,
revoke_uri=GOOGLE_REVOKE_URI,
**kwargs):
"""Constructor for SignedJwtAssertionCredentials.
Args:
service_account_name: string, id for account, usually an email address.
private_key: string, private key in PKCS12 or PEM format.
scope: string or iterable of strings, scope(s) of the credentials being
requested.
private_key_password: string, password for private_key, unused if
private_key is in PEM format.
user_agent: string, HTTP User-Agent to provide for this application.
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
revoke_uri: string, URI for revoke endpoint.
kwargs: kwargs, Additional parameters to add to the JWT token, for
example [email protected]."""
super(SignedJwtAssertionCredentials, self).__init__(
None,
user_agent=user_agent,
token_uri=token_uri,
revoke_uri=revoke_uri,
)
self.scope = util.scopes_to_string(scope)
# Keep base64 encoded so it can be stored in JSON.
self.private_key = base64.b64encode(private_key)
self.private_key_password = private_key_password
self.service_account_name = service_account_name
self.kwargs = kwargs
@classmethod
def from_json(cls, s):
data = simplejson.loads(s)
retval = SignedJwtAssertionCredentials(
data['service_account_name'],
base64.b64decode(data['private_key']),
data['scope'],
private_key_password=data['private_key_password'],
user_agent=data['user_agent'],
token_uri=data['token_uri'],
**data['kwargs']
)
retval.invalid = data['invalid']
retval.access_token = data['access_token']
return retval
def _generate_assertion(self):
"""Generate the assertion that will be used in the request."""
now = long(time.time())
payload = {
'aud': self.token_uri,
'scope': self.scope,
'iat': now,
'exp': now + SignedJwtAssertionCredentials.MAX_TOKEN_LIFETIME_SECS,
'iss': self.service_account_name
}
payload.update(self.kwargs)
logger.debug(str(payload))
private_key = base64.b64decode(self.private_key)
return crypt.make_signed_jwt(crypt.Signer.from_string(
private_key, self.private_key_password), payload)
# Only used in verify_id_token(), which is always calling to the same URI
# for the certs.
_cached_http = httplib2.Http(MemoryCache())
@util.positional(2)
def verify_id_token(id_token, audience, http=None,
cert_uri=ID_TOKEN_VERIFICATON_CERTS):
"""Verifies a signed JWT id_token.
This function requires PyOpenSSL and because of that it does not work on
App Engine.
Args:
id_token: string, A Signed JWT.
audience: string, The audience 'aud' that the token should be for.
http: httplib2.Http, instance to use to make the HTTP request. Callers
should supply an instance that has caching enabled.
cert_uri: string, URI of the certificates in JSON format to
verify the JWT against.
Returns:
The deserialized JSON in the JWT.
Raises:
oauth2client.crypt.AppIdentityError if the JWT fails to verify.
"""
if http is None:
http = _cached_http
resp, content = http.request(cert_uri)
if resp.status == 200:
certs = simplejson.loads(content)
return crypt.verify_signed_jwt_with_certs(id_token, certs, audience)
else:
raise VerifyJwtTokenError('Status code: %d' % resp.status)
def _urlsafe_b64decode(b64string):
# Guard against unicode strings, which base64 can't handle.
b64string = b64string.encode('ascii')
padded = b64string + '=' * (4 - len(b64string) % 4)
return base64.urlsafe_b64decode(padded)
def _extract_id_token(id_token):
"""Extract the JSON payload from a JWT.
Does the extraction w/o checking the signature.
Args:
id_token: string, OAuth 2.0 id_token.
Returns:
object, The deserialized JSON payload.
"""
segments = id_token.split('.')
if (len(segments) != 3):
raise VerifyJwtTokenError(
'Wrong number of segments in token: %s' % id_token)
return simplejson.loads(_urlsafe_b64decode(segments[1]))
def _parse_exchange_token_response(content):
"""Parses response of an exchange token request.
Most providers return JSON but some (e.g. Facebook) return a
url-encoded string.
Args:
content: The body of a response
Returns:
Content as a dictionary object. Note that the dict could be empty,
i.e. {}. That basically indicates a failure.
"""
resp = {}
try:
resp = simplejson.loads(content)
except StandardError:
# different JSON libs raise different exceptions,
# so we just do a catch-all here
resp = dict(parse_qsl(content))
# some providers respond with 'expires', others with 'expires_in'
if resp and 'expires' in resp:
resp['expires_in'] = resp.pop('expires')
return resp
@util.positional(4)
def credentials_from_code(client_id, client_secret, scope, code,
redirect_uri='postmessage', http=None,
user_agent=None, token_uri=GOOGLE_TOKEN_URI,
auth_uri=GOOGLE_AUTH_URI,
revoke_uri=GOOGLE_REVOKE_URI):
"""Exchanges an authorization code for an OAuth2Credentials object.
Args:
client_id: string, client identifier.
client_secret: string, client secret.
scope: string or iterable of strings, scope(s) to request.
code: string, An authroization code, most likely passed down from
the client
redirect_uri: string, this is generally set to 'postmessage' to match the
redirect_uri that the client specified
http: httplib2.Http, optional http instance to use to do the fetch
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
auth_uri: string, URI for authorization endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
revoke_uri: string, URI for revoke endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
Returns:
An OAuth2Credentials object.
Raises:
FlowExchangeError if the authorization code cannot be exchanged for an
access token
"""
flow = OAuth2WebServerFlow(client_id, client_secret, scope,
redirect_uri=redirect_uri, user_agent=user_agent,
auth_uri=auth_uri, token_uri=token_uri,
revoke_uri=revoke_uri)
credentials = flow.step2_exchange(code, http=http)
return credentials
@util.positional(3)
def credentials_from_clientsecrets_and_code(filename, scope, code,
message = None,
redirect_uri='postmessage',
http=None,
cache=None):
"""Returns OAuth2Credentials from a clientsecrets file and an auth code.
Will create the right kind of Flow based on the contents of the clientsecrets
file or will raise InvalidClientSecretsError for unknown types of Flows.
Args:
filename: string, File name of clientsecrets.
scope: string or iterable of strings, scope(s) to request.
code: string, An authorization code, most likely passed down from
the client
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. If message is provided then
sys.exit will be called in the case of an error. If message in not
provided then clientsecrets.InvalidClientSecretsError will be raised.
redirect_uri: string, this is generally set to 'postmessage' to match the
redirect_uri that the client specified
http: httplib2.Http, optional http instance to use to do the fetch
cache: An optional cache service client that implements get() and set()
methods. See clientsecrets.loadfile() for details.
Returns:
An OAuth2Credentials object.
Raises:
FlowExchangeError if the authorization code cannot be exchanged for an
access token
UnknownClientSecretsFlowError if the file describes an unknown kind of Flow.
clientsecrets.InvalidClientSecretsError if the clientsecrets file is
invalid.
"""
flow = flow_from_clientsecrets(filename, scope, message=message, cache=cache,
redirect_uri=redirect_uri)
credentials = flow.step2_exchange(code, http=http)
return credentials
class OAuth2WebServerFlow(Flow):
"""Does the Web Server Flow for OAuth 2.0.
OAuth2WebServerFlow objects may be safely pickled and unpickled.
"""
@util.positional(4)
def __init__(self, client_id, client_secret, scope,
redirect_uri=None,
user_agent=None,
auth_uri=GOOGLE_AUTH_URI,
token_uri=GOOGLE_TOKEN_URI,
revoke_uri=GOOGLE_REVOKE_URI,
**kwargs):
"""Constructor for OAuth2WebServerFlow.
The kwargs argument is used to set extra query parameters on the
auth_uri. For example, the access_type and approval_prompt
query parameters can be set via kwargs.
Args:
client_id: string, client identifier.
client_secret: string client secret.
scope: string or iterable of strings, scope(s) of the credentials being
requested.
redirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob' for
a non-web-based application, or a URI that handles the callback from
the authorization server.
user_agent: string, HTTP User-Agent to provide for this application.
auth_uri: string, URI for authorization endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
revoke_uri: string, URI for revoke endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
**kwargs: dict, The keyword arguments are all optional and required
parameters for the OAuth calls.
"""
self.client_id = client_id
self.client_secret = client_secret
self.scope = util.scopes_to_string(scope)
self.redirect_uri = redirect_uri
self.user_agent = user_agent
self.auth_uri = auth_uri
self.token_uri = token_uri
self.revoke_uri = revoke_uri
self.params = {
'access_type': 'offline',
'response_type': 'code',
}
self.params.update(kwargs)
@util.positional(1)
def step1_get_authorize_url(self, redirect_uri=None):
"""Returns a URI to redirect to the provider.
Args:
redirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob' for
a non-web-based application, or a URI that handles the callback from
the authorization server. This parameter is deprecated, please move to
passing the redirect_uri in via the constructor.
Returns:
A URI as a string to redirect the user to begin the authorization flow.
"""
if redirect_uri is not None:
logger.warning(('The redirect_uri parameter for'
'OAuth2WebServerFlow.step1_get_authorize_url is deprecated. Please'
'move to passing the redirect_uri in via the constructor.'))
self.redirect_uri = redirect_uri
if self.redirect_uri is None:
raise ValueError('The value of redirect_uri must not be None.')
query_params = {
'client_id': self.client_id,
'redirect_uri': self.redirect_uri,
'scope': self.scope,
}
query_params.update(self.params)
return _update_query_params(self.auth_uri, query_params)
@util.positional(2)
def step2_exchange(self, code, http=None):
"""Exhanges a code for OAuth2Credentials.
Args:
code: string or dict, either the code as a string, or a dictionary
of the query parameters to the redirect_uri, which contains
the code.
http: httplib2.Http, optional http instance to use to do the fetch
Returns:
An OAuth2Credentials object that can be used to authorize requests.
Raises:
FlowExchangeError if a problem occured exchanging the code for a
refresh_token.
"""
if not (isinstance(code, str) or isinstance(code, unicode)):
if 'code' not in code:
if 'error' in code:
error_msg = code['error']
else:
error_msg = 'No code was supplied in the query parameters.'
raise FlowExchangeError(error_msg)
else:
code = code['code']
body = urllib.urlencode({
'grant_type': 'authorization_code',
'client_id': self.client_id,
'client_secret': self.client_secret,
'code': code,
'redirect_uri': self.redirect_uri,
'scope': self.scope,
})
headers = {
'content-type': 'application/x-www-form-urlencoded',
}
if self.user_agent is not None:
headers['user-agent'] = self.user_agent
if http is None:
http = httplib2.Http()
resp, content = http.request(self.token_uri, method='POST', body=body,
headers=headers)
d = _parse_exchange_token_response(content)
if resp.status == 200 and 'access_token' in d:
access_token = d['access_token']
refresh_token = d.get('refresh_token', None)
token_expiry = None
if 'expires_in' in d:
token_expiry = datetime.datetime.utcnow() + datetime.timedelta(
seconds=int(d['expires_in']))
if 'id_token' in d:
d['id_token'] = _extract_id_token(d['id_token'])
logger.info('Successfully retrieved access token')
return OAuth2Credentials(access_token, self.client_id,
self.client_secret, refresh_token, token_expiry,
self.token_uri, self.user_agent,
revoke_uri=self.revoke_uri,
id_token=d.get('id_token', None),
token_response=d)
else:
logger.info('Failed to retrieve access token: %s' % content)
if 'error' in d:
# you never know what those providers got to say
error_msg = unicode(d['error'])
else:
error_msg = 'Invalid response: %s.' % str(resp.status)
raise FlowExchangeError(error_msg)
@util.positional(2)
def flow_from_clientsecrets(filename, scope, redirect_uri=None,
message=None, cache=None):
"""Create a Flow from a clientsecrets file.
Will create the right kind of Flow based on the contents of the clientsecrets
file or will raise InvalidClientSecretsError for unknown types of Flows.
Args:
filename: string, File name of client secrets.
scope: string or iterable of strings, scope(s) to request.
redirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob' for
a non-web-based application, or a URI that handles the callback from
the authorization server.
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. If message is provided then
sys.exit will be called in the case of an error. If message in not
provided then clientsecrets.InvalidClientSecretsError will be raised.
cache: An optional cache service client that implements get() and set()
methods. See clientsecrets.loadfile() for details.
Returns:
A Flow object.
Raises:
UnknownClientSecretsFlowError if the file describes an unknown kind of Flow.
clientsecrets.InvalidClientSecretsError if the clientsecrets file is
invalid.
"""
try:
client_type, client_info = clientsecrets.loadfile(filename, cache=cache)
if client_type in (clientsecrets.TYPE_WEB, clientsecrets.TYPE_INSTALLED):
constructor_kwargs = {
'redirect_uri': redirect_uri,
'auth_uri': client_info['auth_uri'],
'token_uri': client_info['token_uri'],
}
revoke_uri = client_info.get('revoke_uri')
if revoke_uri is not None:
constructor_kwargs['revoke_uri'] = revoke_uri
return OAuth2WebServerFlow(
client_info['client_id'], client_info['client_secret'],
scope, **constructor_kwargs)
except clientsecrets.InvalidClientSecretsError:
if message:
sys.exit(message)
else:
raise
else:
raise UnknownClientSecretsFlowError(
'This OAuth 2.0 flow is unsupported: %r' % client_type)
| apache-2.0 | -282,568,818,409,263,330 | 31.464809 | 80 | 0.663633 | false |
ptrendx/mxnet | tests/nightly/test_kvstore.py | 12 | 10372 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
sys.path.insert(0, "../../python/")
import mxnet as mx
import numpy as np
import numpy.random as rnd
import copy
from mxnet.test_utils import assert_almost_equal
def check_diff_to_scalar(A, x, rank=None):
""" assert A == x"""
assert(np.sum(np.abs((A - x).asnumpy())) == 0), (rank, A.asnumpy(), x)
def compute_expected_2bit_quantization(arr, curr_residual, threshold):
from struct import pack,unpack
def bits2int(bits):
bits = [int(x) for x in bits[::-1]]
x = 0
for i in range(len(bits)):
x += bits[i]*2**i
return x
def as_float32(s):
return unpack("f",pack("I", bits2int(s)))[0]
# str_quant stores the quantized representation as a sequence of bits
str_quant = ''
new_residual = []
decompr = []
arr_npy = arr.asnumpy()
for i, a in np.ndenumerate(arr_npy):
a += curr_residual[i]
if a >= threshold:
str_quant += '11'
new_residual.append(a - threshold)
decompr.append(threshold)
elif a <= (-1*threshold):
str_quant += '10'
new_residual.append(a + threshold)
decompr.append(-1*threshold)
else:
str_quant += '00'
new_residual.append(a)
decompr.append(0)
# append extra bits when size of array not a factor of 16
if len(str_quant)%16 != 0:
str_quant += '0'*(16 - len(str_quant)%16)
compr = []
# converts the string generated into integers 32chars at a time
i = 0
while i<len(str_quant):
cur_float = str_quant[i+24:i+32] + str_quant[i+16:i+24] + str_quant[i+8:i+16] + str_quant[i:i+8]
compr.append(as_float32(cur_float))
i+=32
return np.array(compr), np.array(new_residual).reshape(arr.shape), np.array(decompr).reshape(arr.shape)
## individual key interface
def test_kvstore(kv_type, stype):
print(kv_type)
kv = mx.kv.create(kv_type)
kv.set_optimizer(mx.optimizer.create('test', rescale_grad=lr))
for k, s in zip(keys, shapes):
kv.init(k, mx.nd.zeros(s))
res = [np.zeros(s) for s in shapes]
for i in range(nrepeat):
for j in range(len(keys)):
kv.push(keys[j], [mx.nd.array(
data[i][j][g], mx.gpu(g)).tostype(stype) for g in range(nworker)])
res = [a + b * lr for a, b in zip(res, [sum(d) for d in data[i]])]
for j in range(len(keys)):
out = [mx.nd.zeros(shapes[j], mx.gpu(g)) for g in range(nworker)]
kv.pull(keys[j], out=out)
err = [np.sum(np.abs(o.asnumpy() - res[j])) for o in out]
err = sum(err) / np.sum(np.abs(res[j]))
assert(err < 1e-6), (err, shapes[j])
def test_compress_kvstore(kv_type, compression='2bit', threshold=0.5):
print(kv_type + ' with ' + compression + ' compression')
rate = 2
kv = mx.kv.create(kv_type)
kv.set_gradient_compression({'type':compression, 'threshold':threshold})
kv.set_optimizer(mx.optimizer.create('test', rescale_grad=rate))
for k, s in zip(keys, shapes):
kv.init(k, mx.nd.zeros(s))
# init one key with 1s so we can check if it was compressed during init
kv.init(gc_init_test_key, mx.nd.ones(shapes[0]))
# use different keys for random tests so that
# we can track residual from start
random_keys = [13, 15, 17]
for k, s in zip(random_keys, shapes):
kv.init(k, mx.nd.zeros(s))
def pull_init_test(kv):
# checks that compression is not applied to init of key
out = [mx.nd.zeros(shapes[0], mx.gpu(g)) for g in range(nworker)]
kv.pull(gc_init_test_key, out=out)
exp = np.ones_like(out[0].asnumpy())
for o in out:
assert_almost_equal(o.asnumpy(), exp)
def pull_before_push(kv):
for i in range(nrepeat):
for j in range(len(keys)):
out = [mx.nd.ones(shapes[j], mx.gpu(g)) for g in range(nworker)]
kv.pull(keys[j], out=out)
exp = np.zeros_like(out[0].asnumpy())
for o in out:
assert_almost_equal(o.asnumpy(), exp)
def push_zeros(kv):
for i in range(nrepeat):
for j in range(len(keys)):
kv.push(keys[j], [mx.nd.zeros(shapes[j], mx.gpu(g)) for g in range(nworker)])
out = [mx.nd.ones(shapes[j], mx.gpu(g)) for g in range(nworker)]
kv.pull(keys[j], out=out)
exp = np.zeros_like(out[0].asnumpy())
for o in out:
assert_almost_equal(o.asnumpy(), exp)
def verify_residual(kv, threshold, rate):
for j in range(len(keys)):
kv.push(keys[j], [mx.nd.ones(shapes[j], mx.gpu(g))*0.4 for g in range(nworker)])
out = [mx.nd.zeros(shapes[j], mx.gpu(g)) for g in range(nworker)]
kv.pull(keys[j],out=out)
for o in out:
check_diff_to_scalar(o, 0)
kv.push(keys[j], [mx.nd.ones(shapes[j], mx.gpu(g))*(threshold-0.3) for g in range(nworker)])
out = [mx.nd.zeros(shapes[j], mx.gpu(g)) for g in range(nworker)]
kv.pull(keys[j],out=out)
curval = threshold * rate * nworker
for o in out:
check_diff_to_scalar(o, curval)
kv.push(keys[j], [mx.nd.ones(shapes[j], mx.gpu(g))*(0.2) for g in range(nworker)])
out = [mx.nd.zeros(shapes[j], mx.gpu(g)) for g in range(nworker)]
kv.pull(keys[j],out=out)
for o in out:
check_diff_to_scalar(o, curval)
kv.push(keys[j], [mx.nd.ones(shapes[j], mx.gpu(g))*(threshold-0.3) for g in range(nworker)])
out = [mx.nd.zeros(shapes[j], mx.gpu(g)) for g in range(nworker)]
kv.pull(keys[j],out=out)
curval += threshold*rate*nworker
for o in out:
check_diff_to_scalar(o, curval)
# residual would be 0 now
return curval
def check_neg(kv, neg, rate, curval):
for r in range(nrepeat):
curval = curval + rate*nworker*neg
for j in range(len(keys)):
kv.push(keys[j], [mx.nd.ones(shapes[j], mx.gpu(g))*neg for g in range(nworker)])
out = [mx.nd.ones(shapes[j], mx.gpu(g)) for g in range(nworker)]
kv.pull(keys[j], out=out)
for o in out:
check_diff_to_scalar(o, curval)
# residual would be 0 again
def check_compr_random(kv, threshold):
for k, s in zip(random_keys, shapes):
curr_residual = [np.zeros(s) for g in range(nworker)]
orig_val = [mx.nd.zeros(s, mx.gpu(g)) for g in range(nworker)]
kv.pull(k, out=orig_val)
grads = [mx.nd.random_uniform(-0.6, 0.6, shape=s, ctx=mx.gpu(g)) for g in range(nworker)]
grads_cpy = copy.deepcopy(grads)
kv.push(k, grads)
val = [mx.nd.zeros(s, mx.gpu(g)) for g in range(nworker)]
kv.pull(k, out=val)
diffs = [val[g] - orig_val[g] for g in range(nworker)]
# compute expected by using simulation of operator
# on cpu
sum_dequantized_vals = np.zeros(s)
for g in range(nworker):
compr, curr_residual[g], decompr = compute_expected_2bit_quantization(
grads_cpy[g], curr_residual[g], threshold)
sum_dequantized_vals += (decompr * rate)
for g in range(nworker):
assert_almost_equal(diffs[g].asnumpy(), sum_dequantized_vals)
pull_init_test(kv)
pull_before_push(kv)
push_zeros(kv)
curval = verify_residual(kv, threshold, rate)
check_neg(kv, -1*threshold, rate, curval)
check_compr_random(kv, threshold)
## group keys interface
def test_group_kvstore(kv_type, stype):
print(kv_type)
kv = mx.kv.create(kv_type)
kv.set_optimizer(mx.optimizer.create('test', rescale_grad=lr))
kv.init(keys, [mx.nd.zeros(s) for s in shapes])
res = [np.zeros(s) for s in shapes]
out = [[mx.nd.zeros(s, mx.gpu(g)) for g in range(nworker)] for s in shapes]
for i in range(nrepeat):
kv.push(keys, [[
mx.nd.array(data[i][j][g], mx.gpu(g)).tostype(stype) for g in range(nworker)]
for j in range(len(keys))])
kv.pull(keys, out=out)
res = [a + b * lr for a, b in zip(res, [sum(d) for d in data[i]])]
for a, b in zip(res, out):
err = [np.sum(np.abs(o.asnumpy() - a)) for o in b]
err = sum(err) / np.sum(np.abs(a))
assert(err < 1e-6), (err, a.shape)
if __name__ == "__main__":
keys = [3, 5, 7]
# let the last shape exceed MXNET_KVSTORE_BIGARRAY_BOUND
shapes = [(4, 4), (100, 100), (2000, 2000)]
stypes = ['default', 'row_sparse']
gc_init_test_key = 9
lr = .1
nworker = 4
nrepeat = 10
# generate data
data = [[[np.random.random(s)*2-1 for i in range(nworker)] for s in shapes] for j in range(nrepeat)]
for stype in stypes:
test_kvstore('local_update_cpu', stype)
test_kvstore('local_allreduce_cpu', stype)
test_kvstore('local_allreduce_device', stype)
## compression for local kvstore happens only when reduce is on device
test_compress_kvstore('local_allreduce_device')
for stype in stypes:
test_group_kvstore('local_update_cpu', stype)
test_group_kvstore('local_allreduce_cpu', stype)
test_group_kvstore('local_allreduce_device', stype)
| apache-2.0 | -4,466,153,824,638,980,000 | 39.20155 | 107 | 0.575106 | false |
dannyperez/bolivarcoin | share/seeds/generate-seeds.py | 79 | 4297 | #!/usr/bin/python
# Copyright (c) 2014 Wladmir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by share/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 8333)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 18333)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| mit | 2,846,191,250,811,500,000 | 30.82963 | 98 | 0.575983 | false |
Panos512/inspire-next | inspirehep/modules/authors/__init__.py | 3 | 1142 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2016 CERN.
#
# INSPIRE is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""INSPIRE authors."""
from __future__ import absolute_import, print_function
from .ext import INSPIREAuthors
from .receivers import *
__all__ = ('INSPIREAuthors', )
| gpl-2.0 | 3,912,756,386,225,137,000 | 33.606061 | 76 | 0.747811 | false |
alphagov/notifications-admin | tests/app/main/views/test_notifications.py | 1 | 29605 | import base64
from functools import partial
from unittest.mock import mock_open
import pytest
from flask import url_for
from freezegun import freeze_time
from notifications_python_client.errors import APIError
from PyPDF2.utils import PdfReadError
from tests.conftest import (
SERVICE_ONE_ID,
create_active_caseworking_user,
create_active_user_with_permissions,
create_notification,
normalize_spaces,
)
@pytest.mark.parametrize('key_type, notification_status, expected_status', [
(None, 'created', 'Sending'),
(None, 'sending', 'Sending'),
(None, 'delivered', 'Delivered'),
(None, 'failed', 'Failed'),
(None, 'temporary-failure', 'Phone not accepting messages right now'),
(None, 'permanent-failure', 'Not delivered'),
(None, 'technical-failure', 'Technical failure'),
('team', 'delivered', 'Delivered'),
('live', 'delivered', 'Delivered'),
('test', 'sending', 'Sending (test)'),
('test', 'delivered', 'Delivered (test)'),
('test', 'permanent-failure', 'Not delivered (test)'),
])
@pytest.mark.parametrize('user', [
create_active_user_with_permissions(),
create_active_caseworking_user(),
])
@freeze_time("2016-01-01 11:09:00.061258")
def test_notification_status_page_shows_details(
client_request,
mocker,
mock_has_no_jobs,
service_one,
fake_uuid,
user,
key_type,
notification_status,
expected_status,
):
mocker.patch('app.user_api_client.get_user', return_value=user)
notification = create_notification(notification_status=notification_status, key_type=key_type)
_mock_get_notification = mocker.patch('app.notification_api_client.get_notification', return_value=notification)
page = client_request.get(
'main.view_notification',
service_id=service_one['id'],
notification_id=fake_uuid
)
assert normalize_spaces(page.select('.sms-message-recipient')[0].text) == (
'To: 07123456789'
)
assert normalize_spaces(page.select('.sms-message-wrapper')[0].text) == (
'service one: hello Jo'
)
assert normalize_spaces(page.select('.ajax-block-container p')[0].text) == (
expected_status
)
_mock_get_notification.assert_called_with(
service_one['id'],
fake_uuid
)
@pytest.mark.parametrize('notification_type, notification_status, expected_class', [
('sms', 'failed', 'error'),
('email', 'failed', 'error'),
('sms', 'sent', 'sent-international'),
('email', 'sent', None),
('sms', 'created', 'default'),
('email', 'created', 'default'),
])
@freeze_time("2016-01-01 11:09:00.061258")
def test_notification_status_page_formats_email_and_sms_status_correctly(
client_request,
mocker,
mock_has_no_jobs,
service_one,
fake_uuid,
active_user_with_permissions,
notification_type,
notification_status,
expected_class,
):
mocker.patch('app.user_api_client.get_user', return_value=active_user_with_permissions)
notification = create_notification(notification_status=notification_status, template_type=notification_type)
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
page = client_request.get(
'main.view_notification',
service_id=service_one['id'],
notification_id=fake_uuid
)
assert page.select_one(f'.ajax-block-container p.notification-status.{expected_class}')
@pytest.mark.parametrize('template_redaction_setting, expected_content', [
(False, 'service one: hello Jo'),
(True, 'service one: hello hidden'),
])
@freeze_time("2016-01-01 11:09:00.061258")
def test_notification_status_page_respects_redaction(
client_request,
mocker,
service_one,
fake_uuid,
template_redaction_setting,
expected_content,
):
_mock_get_notification = mocker.patch(
'app.notification_api_client.get_notification',
return_value=create_notification(redact_personalisation=template_redaction_setting)
)
page = client_request.get(
'main.view_notification',
service_id=service_one['id'],
notification_id=fake_uuid
)
assert normalize_spaces(page.select('.sms-message-wrapper')[0].text) == expected_content
_mock_get_notification.assert_called_with(
service_one['id'],
fake_uuid,
)
@pytest.mark.parametrize('extra_args, expected_back_link', [
(
{},
partial(url_for, 'main.view_notifications', message_type='sms', status='sending,delivered,failed'),
),
(
{'from_job': 'job_id'},
partial(url_for, 'main.view_job', job_id='job_id'),
),
(
{'from_uploaded_letters': '2020-02-02'},
partial(url_for, 'main.uploaded_letters', letter_print_day='2020-02-02'),
),
(
{'help': '0'},
None,
),
(
{'help': '1'},
None,
),
(
{'help': '2'},
None,
),
])
def test_notification_status_shows_expected_back_link(
client_request,
mocker,
mock_get_notification,
fake_uuid,
extra_args,
expected_back_link,
):
page = client_request.get(
'main.view_notification',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
**extra_args
)
back_link = page.select_one('.govuk-back-link')
if expected_back_link:
assert back_link['href'] == expected_back_link(service_id=SERVICE_ONE_ID)
else:
assert back_link is None
@pytest.mark.parametrize('time_of_viewing_page, expected_message', (
('2012-01-01 01:01', (
"‘sample template’ was sent by Test User today at 1:01am"
)),
('2012-01-02 01:01', (
"‘sample template’ was sent by Test User yesterday at 1:01am"
)),
('2012-01-03 01:01', (
"‘sample template’ was sent by Test User on 1 January at 1:01am"
)),
('2013-01-03 01:01', (
"‘sample template’ was sent by Test User on 1 January 2012 at 1:01am"
)),
))
def test_notification_page_doesnt_link_to_template_in_tour(
mocker,
client_request,
fake_uuid,
mock_get_notification,
time_of_viewing_page,
expected_message,
):
with freeze_time('2012-01-01 01:01'):
notification = create_notification()
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
with freeze_time(time_of_viewing_page):
page = client_request.get(
'main.view_notification',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
help=3,
)
assert normalize_spaces(page.select('main p:nth-of-type(1)')[0].text) == (
expected_message
)
assert len(page.select('main p:nth-of-type(1) a')) == 0
@freeze_time("2016-01-01 01:01")
def test_notification_page_shows_page_for_letter_notification(
client_request,
mocker,
fake_uuid,
):
count_of_pages = 3
notification = create_notification(notification_status='created', template_type='letter', postage='second')
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
mock_page_count = mocker.patch(
'app.main.views.notifications.get_page_count_for_letter',
return_value=count_of_pages
)
page = client_request.get(
'main.view_notification',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
)
assert normalize_spaces(page.select('main p:nth-of-type(1)')[0].text) == (
"‘sample template’ was sent by Test User today at 1:01am"
)
assert normalize_spaces(page.select('main p:nth-of-type(2)')[0].text) == (
'Printing starts today at 5:30pm'
)
assert normalize_spaces(page.select('main p:nth-of-type(3)')[0].text) == (
'Estimated delivery date: Wednesday 6 January'
)
assert len(page.select('.letter-postage')) == 1
assert normalize_spaces(page.select_one('.letter-postage').text) == (
'Postage: second class'
)
assert page.select_one('.letter-postage')['class'] == [
'letter-postage', 'letter-postage-second'
]
assert page.select('p.notification-status') == []
letter_images = page.select('main img')
assert len(letter_images) == count_of_pages
for index in range(count_of_pages):
assert page.select('img')[index]['src'].endswith(
'.png?page={}'.format(index + 1)
)
assert len(mock_page_count.call_args_list) == 1
assert mock_page_count.call_args_list[0][0][0]['name'] == 'sample template'
assert mock_page_count.call_args_list[0][1]['values'] == {'name': 'Jo'}
@freeze_time("2020-01-01 00:00")
def test_notification_page_shows_uploaded_letter(
client_request,
mocker,
fake_uuid,
):
mocker.patch(
'app.main.views.notifications.get_letter_file_data',
return_value=(b'foo', {
'message': '',
'invalid_pages': '[]',
'page_count': '1'
})
)
mocker.patch(
'app.main.views.notifications.pdf_page_count',
return_value=1
)
mocker.patch(
'app.main.views.notifications.get_page_count_for_letter',
return_value=1,
)
notification = create_notification(
notification_status='created',
template_type='letter',
is_precompiled_letter=True,
sent_one_off=True,
)
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
page = client_request.get(
'main.view_notification',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
)
assert normalize_spaces(page.select('main p:nth-of-type(1)')[0].text) == (
'Uploaded by Test User yesterday at midnight'
)
assert normalize_spaces(page.select('main p:nth-of-type(2)')[0].text) == (
'Printing starts today at 5:30pm'
)
@freeze_time("2016-01-01 01:01")
@pytest.mark.parametrize('is_precompiled_letter, expected_p1, expected_p2, expected_postage', (
(
True,
'Provided as PDF today at 1:01am',
'This letter passed our checks, but we will not print it because you used a test key.',
'Postage: second class'
),
(
False,
'‘sample template’ was sent today at 1:01am',
'We will not print this letter because you used a test key.',
'Postage: second class',
),
))
def test_notification_page_shows_page_for_letter_sent_with_test_key(
client_request,
mocker,
fake_uuid,
is_precompiled_letter,
expected_p1,
expected_p2,
expected_postage,
):
if is_precompiled_letter:
mocker.patch(
'app.main.views.notifications.get_letter_file_data',
return_value=(b'foo', {
'message': '',
'invalid_pages': '[]',
'page_count': '1'
})
)
mocker.patch(
'app.main.views.notifications.pdf_page_count',
return_value=1
)
mocker.patch(
'app.main.views.notifications.get_page_count_for_letter',
return_value=1,
)
notification = create_notification(
notification_status='created',
template_type='letter',
is_precompiled_letter=is_precompiled_letter,
postage='second',
key_type='test',
sent_one_off=False,
)
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
page = client_request.get(
'main.view_notification',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
)
assert normalize_spaces(page.select('main p:nth-of-type(1)')[0].text) == (
expected_p1
)
assert normalize_spaces(page.select('main p:nth-of-type(2)')[0].text) == (
expected_p2
)
assert normalize_spaces(
page.select_one('.letter-postage').text
) == expected_postage
assert page.select('p.notification-status') == []
def test_notification_page_shows_validation_failed_precompiled_letter(
client_request,
mocker,
fake_uuid,
):
notification = create_notification(template_type='letter',
notification_status='validation-failed',
is_precompiled_letter=True
)
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
metadata = {"page_count": "1", "status": "validation-failed",
"invalid_pages": "[1]",
"message": "content-outside-printable-area"}
mocker.patch('app.main.views.notifications.get_letter_file_data',
return_value=("some letter content", metadata))
mocker.patch(
'app.main.views.notifications.get_page_count_for_letter',
return_value=1,
)
page = client_request.get(
'main.view_notification',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
)
error_message = page.find('p', class_='notification-status-cancelled').text
assert normalize_spaces(error_message) == (
'Validation failed because content is outside the printable area on page 1.'
'Files must meet our letter specification.'
)
assert not page.select('p.notification-status')
assert page.select_one('main img')['src'].endswith('.png?page=1')
assert not page.select('.letter-postage')
@pytest.mark.parametrize('notification_status, expected_message', (
(
'permanent-failure',
'Permanent failure – The provider cannot print the letter. Your letter will not be dispatched.',
),
(
'cancelled',
'Cancelled 1 January at 1:02am',
),
(
'technical-failure',
'Technical failure – Notify will resend once the team have fixed the problem',
),
))
@freeze_time("2016-01-01 01:01")
def test_notification_page_shows_cancelled_or_failed_letter(
client_request,
mocker,
fake_uuid,
notification_status,
expected_message,
):
notification = create_notification(template_type='letter', notification_status=notification_status)
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
mocker.patch(
'app.main.views.notifications.get_page_count_for_letter',
return_value=1,
)
page = client_request.get(
'main.view_notification',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
)
assert normalize_spaces(page.select('main p')[0].text) == (
"‘sample template’ was sent by Test User today at 1:01am"
)
assert normalize_spaces(page.select('main p')[1].text) == (
expected_message
)
assert not page.select('p.notification-status')
assert page.select_one('main img')['src'].endswith('.png?page=1')
@pytest.mark.parametrize('notification_type', ['email', 'sms'])
@freeze_time('2016-01-01 15:00')
def test_notification_page_does_not_show_cancel_link_for_sms_or_email_notifications(
client_request,
mocker,
fake_uuid,
notification_type,
):
notification = create_notification(template_type=notification_type, notification_status='created')
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
page = client_request.get(
'main.view_notification',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
)
assert 'Cancel sending this letter' not in normalize_spaces(page.text)
@freeze_time('2016-01-01 15:00')
def test_notification_page_shows_cancel_link_for_letter_which_can_be_cancelled(
client_request,
mocker,
fake_uuid,
):
notification = create_notification(template_type='letter', notification_status='created')
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
mocker.patch(
'app.main.views.notifications.get_page_count_for_letter',
return_value=1
)
page = client_request.get(
'main.view_notification',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
)
assert 'Cancel sending this letter' in normalize_spaces(page.text)
@freeze_time('2016-01-01 15:00')
def test_notification_page_does_not_show_cancel_link_for_letter_which_cannot_be_cancelled(
client_request,
mocker,
fake_uuid,
):
notification = create_notification(template_type='letter')
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
mocker.patch(
'app.main.views.notifications.get_page_count_for_letter',
return_value=1
)
page = client_request.get(
'main.view_notification',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
)
assert 'Cancel sending this letter' not in normalize_spaces(page.text)
@pytest.mark.parametrize('postage, expected_postage_text, expected_class_value, expected_delivery', (
(
'first',
'Postage: first class',
'letter-postage-first',
'Estimated delivery date: Tuesday 5 January',
),
(
'europe',
'Postage: international',
'letter-postage-international',
'Estimated delivery date: Friday 8 January',
),
(
'rest-of-world',
'Postage: international',
'letter-postage-international',
'Estimated delivery date: Monday 11 January',
),
))
@freeze_time("2016-01-01 18:00")
def test_notification_page_shows_page_for_other_postage_classes(
client_request,
mocker,
fake_uuid,
postage,
expected_postage_text,
expected_class_value,
expected_delivery,
):
notification = create_notification(
notification_status='pending-virus-check',
template_type='letter',
postage=postage,
)
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
mocker.patch('app.main.views.notifications.get_page_count_for_letter', return_value=3)
page = client_request.get(
'main.view_notification',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
)
assert normalize_spaces(page.select('main p:nth-of-type(2)')[0].text) == 'Printing starts tomorrow at 5:30pm'
assert normalize_spaces(page.select('main p:nth-of-type(3)')[0].text) == (
expected_delivery
)
assert normalize_spaces(page.select_one('.letter-postage').text) == (
expected_postage_text
)
assert page.select_one('.letter-postage')['class'] == [
'letter-postage', expected_class_value
]
@pytest.mark.parametrize('filetype', [
'pdf', 'png'
])
@pytest.mark.parametrize('user', [
create_active_user_with_permissions(),
create_active_caseworking_user(),
])
def test_should_show_image_of_letter_notification(
logged_in_client,
fake_uuid,
mocker,
filetype,
user,
):
mocker.patch('app.user_api_client.get_user', return_value=user)
notification = create_notification(template_type='letter')
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
mocker.patch(
'app.main.views.notifications.notification_api_client.get_notification_letter_preview',
return_value={
'content': base64.b64encode(b'foo').decode('utf-8')
}
)
response = logged_in_client.get(url_for(
'main.view_letter_notification_as_preview',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
filetype=filetype
))
assert response.status_code == 200
assert response.get_data(as_text=True) == 'foo'
def test_should_show_image_of_letter_notification_that_failed_validation(
logged_in_client,
fake_uuid,
mocker
):
notification = create_notification(template_type='letter', notification_status='validation-failed')
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
metadata = {
'message': 'content-outside-printable-area',
'invalid_pages': '[1]',
'page_count': '1'
}
mocker.patch(
'app.main.views.notifications.notification_api_client.get_notification_letter_preview',
return_value={
'content': base64.b64encode(b'foo').decode('utf-8'),
'metadata': metadata
}
)
response = logged_in_client.get(url_for(
'main.view_letter_notification_as_preview',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
filetype='png',
with_metadata=True
))
assert response.status_code == 200
assert response.get_data(as_text=True) == 'foo', metadata
def test_should_show_preview_error_image_letter_notification_on_preview_error(
logged_in_client,
fake_uuid,
mocker,
):
notification = create_notification(template_type='letter')
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
mocker.patch(
'app.main.views.notifications.notification_api_client.get_notification_letter_preview',
side_effect=APIError
)
mocker.patch("builtins.open", mock_open(read_data=b"preview error image"))
response = logged_in_client.get(url_for(
'main.view_letter_notification_as_preview',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
filetype='png'
))
assert response.status_code == 200
assert response.get_data(as_text=True) == 'preview error image'
def test_notification_page_shows_error_message_if_precompiled_letter_cannot_be_opened(
client_request,
mocker,
fake_uuid,
):
notification = create_notification(
notification_status='validation-failed',
template_type='letter',
is_precompiled_letter=True)
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
mocker.patch(
'app.main.views.notifications.get_letter_file_data',
side_effect=PdfReadError()
)
mocker.patch(
'app.main.views.notifications.pdf_page_count',
side_effect=PdfReadError()
)
page = client_request.get(
'main.view_notification',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
)
error_message = page.find('p', class_='notification-status-cancelled').text
assert normalize_spaces(error_message) == \
"Validation failed – There’s a problem with your letter. Notify cannot read this PDF."
def test_should_404_for_unknown_extension(
client_request,
fake_uuid,
):
client_request.get(
'main.view_letter_notification_as_preview',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
filetype='docx',
_expected_status=404,
)
@pytest.mark.parametrize('service_permissions, template_type, link_expected', [
([], '', False),
(['inbound_sms'], 'email', False),
(['inbound_sms'], 'letter', False),
(['inbound_sms'], 'sms', True),
])
def test_notification_page_has_link_to_send_another_for_sms(
client_request,
mocker,
fake_uuid,
service_one,
service_permissions,
template_type,
link_expected,
):
service_one['permissions'] = service_permissions
notification = create_notification(template_type=template_type)
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
mocker.patch(
'app.main.views.notifications.get_page_count_for_letter',
return_value=1
)
page = client_request.get(
'main.view_notification',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
)
last_paragraph = page.select('main p')[-1]
conversation_link = url_for(
'.conversation',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
_anchor='n{}'.format(fake_uuid),
)
if link_expected:
assert normalize_spaces(last_paragraph.text) == (
'See all text messages sent to this phone number'
)
assert last_paragraph.select_one('a')['href'] == conversation_link
else:
assert conversation_link not in str(page.select_one('main'))
@pytest.mark.parametrize('template_type, expected_link', [
('email', lambda notification_id: None),
('sms', lambda notification_id: None),
('letter', partial(
url_for,
'main.view_letter_notification_as_preview',
service_id=SERVICE_ONE_ID,
filetype='pdf'
)),
])
def test_notification_page_has_link_to_download_letter(
client_request,
mocker,
fake_uuid,
service_one,
template_type,
expected_link,
):
notification = create_notification(template_type=template_type)
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
mocker.patch(
'app.main.views.notifications.get_page_count_for_letter',
return_value=1
)
page = client_request.get(
'main.view_notification',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
)
try:
download_link = page.select_one('a[download]')['href']
except TypeError:
download_link = None
assert download_link == expected_link(notification_id=fake_uuid)
@pytest.mark.parametrize('is_precompiled_letter, has_template_link', [
(True, False),
(False, True),
])
def test_notification_page_has_expected_template_link_for_letter(
client_request,
mocker,
fake_uuid,
service_one,
is_precompiled_letter,
has_template_link
):
if is_precompiled_letter:
mocker.patch(
'app.main.views.notifications.get_letter_file_data',
side_effect=[(b'foo', {"message": "", "invalid_pages": "[]", "page_count": "1"}), b'foo']
)
mocker.patch(
'app.main.views.notifications.pdf_page_count',
return_value=1
)
notification = create_notification(template_type='letter', is_precompiled_letter=is_precompiled_letter)
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
mocker.patch(
'app.main.views.notifications.get_page_count_for_letter',
return_value=1
)
page = client_request.get(
'main.view_notification',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
)
link = page.select_one('main > p:nth-of-type(1) > a')
if has_template_link:
assert link
else:
assert link is None
def test_should_show_image_of_precompiled_letter_notification(
logged_in_client,
fake_uuid,
mocker,
):
notification = create_notification(template_type='letter', is_precompiled_letter=True)
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
mock_pdf_page_count = mocker.patch(
'app.main.views.notifications.pdf_page_count',
return_value=1
)
mocker.patch(
'app.main.views.notifications.notification_api_client.get_notification_letter_preview',
return_value={
'content': base64.b64encode(b'foo').decode('utf-8')
}
)
response = logged_in_client.get(url_for(
'main.view_letter_notification_as_preview',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
filetype="png"
))
assert response.status_code == 200
assert response.get_data(as_text=True) == 'foo'
assert mock_pdf_page_count.called_once()
@freeze_time('2016-01-01 15:00')
def test_show_cancel_letter_confirmation(
client_request,
mocker,
fake_uuid,
):
notification = create_notification(template_type='letter', notification_status='created')
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
mocker.patch(
'app.main.views.notifications.get_page_count_for_letter',
return_value=1
)
page = client_request.get(
'main.cancel_letter',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
)
flash_message = normalize_spaces(page.find('div', class_='banner-dangerous').text)
assert 'Are you sure you want to cancel sending this letter?' in flash_message
@freeze_time('2016-01-01 15:00')
def test_cancelling_a_letter_calls_the_api(
client_request,
mocker,
fake_uuid,
):
notification = create_notification(template_type='letter', notification_status='created')
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
mocker.patch(
'app.main.views.notifications.get_page_count_for_letter',
return_value=1
)
cancel_endpoint = mocker.patch(
'app.main.views.notifications.notification_api_client.update_notification_to_cancelled'
)
client_request.post(
'main.cancel_letter',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
_follow_redirects=True,
_expected_redirect=None,
)
assert cancel_endpoint.called
@pytest.mark.parametrize('notification_type', ['sms', 'email'])
def test_should_show_reply_to_from_notification(
mocker,
fake_uuid,
notification_type,
client_request,
):
notification = create_notification(reply_to_text='reply to info', template_type=notification_type)
mocker.patch('app.notification_api_client.get_notification', return_value=notification)
page = client_request.get(
'main.view_notification',
service_id=SERVICE_ONE_ID,
notification_id=fake_uuid,
)
assert 'reply to info' in page.text
| mit | 8,759,862,887,065,441,000 | 29.265097 | 116 | 0.642125 | false |
tensorflow/models | research/delf/delf/python/examples/extract_boxes.py | 1 | 7510 | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extracts bounding boxes from a list of images, saving them to files.
The images must be in JPG format. The program checks if boxes already
exist, and skips computation for those.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import time
from absl import app
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from delf import box_io
from delf import utils
from delf import detector
cmd_args = None
# Extension/suffix of produced files.
_BOX_EXT = '.boxes'
_VIZ_SUFFIX = '_viz.jpg'
# Used for plotting boxes.
_BOX_EDGE_COLORS = ['r', 'y', 'b', 'm', 'k', 'g', 'c', 'w']
# Pace to report extraction log.
_STATUS_CHECK_ITERATIONS = 100
def _ReadImageList(list_path):
"""Helper function to read image paths.
Args:
list_path: Path to list of images, one image path per line.
Returns:
image_paths: List of image paths.
"""
with tf.io.gfile.GFile(list_path, 'r') as f:
image_paths = f.readlines()
image_paths = [entry.rstrip() for entry in image_paths]
return image_paths
def _FilterBoxesByScore(boxes, scores, class_indices, score_threshold):
"""Filter boxes based on detection scores.
Boxes with detection score >= score_threshold are returned.
Args:
boxes: [N, 4] float array denoting bounding box coordinates, in format [top,
left, bottom, right].
scores: [N] float array with detection scores.
class_indices: [N] int array with class indices.
score_threshold: Float detection score threshold to use.
Returns:
selected_boxes: selected `boxes`.
selected_scores: selected `scores`.
selected_class_indices: selected `class_indices`.
"""
selected_boxes = []
selected_scores = []
selected_class_indices = []
for i, box in enumerate(boxes):
if scores[i] >= score_threshold:
selected_boxes.append(box)
selected_scores.append(scores[i])
selected_class_indices.append(class_indices[i])
return np.array(selected_boxes), np.array(selected_scores), np.array(
selected_class_indices)
def _PlotBoxesAndSaveImage(image, boxes, output_path):
"""Plot boxes on image and save to output path.
Args:
image: Numpy array containing image.
boxes: [N, 4] float array denoting bounding box coordinates, in format [top,
left, bottom, right].
output_path: String containing output path.
"""
height = image.shape[0]
width = image.shape[1]
fig, ax = plt.subplots(1)
ax.imshow(image)
for i, box in enumerate(boxes):
scaled_box = [
box[0] * height, box[1] * width, box[2] * height, box[3] * width
]
rect = patches.Rectangle([scaled_box[1], scaled_box[0]],
scaled_box[3] - scaled_box[1],
scaled_box[2] - scaled_box[0],
linewidth=3,
edgecolor=_BOX_EDGE_COLORS[i %
len(_BOX_EDGE_COLORS)],
facecolor='none')
ax.add_patch(rect)
ax.axis('off')
plt.savefig(output_path, bbox_inches='tight')
plt.close(fig)
def main(argv):
if len(argv) > 1:
raise RuntimeError('Too many command-line arguments.')
# Read list of images.
print('Reading list of images...')
image_paths = _ReadImageList(cmd_args.list_images_path)
num_images = len(image_paths)
print(f'done! Found {num_images} images')
# Create output directories if necessary.
if not tf.io.gfile.exists(cmd_args.output_dir):
tf.io.gfile.makedirs(cmd_args.output_dir)
if cmd_args.output_viz_dir and not tf.io.gfile.exists(
cmd_args.output_viz_dir):
tf.io.gfile.makedirs(cmd_args.output_viz_dir)
detector_fn = detector.MakeDetector(cmd_args.detector_path)
start = time.time()
for i, image_path in enumerate(image_paths):
# Report progress once in a while.
if i == 0:
print('Starting to detect objects in images...')
elif i % _STATUS_CHECK_ITERATIONS == 0:
elapsed = (time.time() - start)
print(f'Processing image {i} out of {num_images}, last '
f'{_STATUS_CHECK_ITERATIONS} images took {elapsed} seconds')
start = time.time()
# If descriptor already exists, skip its computation.
base_boxes_filename, _ = os.path.splitext(os.path.basename(image_path))
out_boxes_filename = base_boxes_filename + _BOX_EXT
out_boxes_fullpath = os.path.join(cmd_args.output_dir, out_boxes_filename)
if tf.io.gfile.exists(out_boxes_fullpath):
print(f'Skipping {image_path}')
continue
im = np.expand_dims(np.array(utils.RgbLoader(image_paths[i])), 0)
# Extract and save boxes.
(boxes_out, scores_out, class_indices_out) = detector_fn(im)
(selected_boxes, selected_scores,
selected_class_indices) = _FilterBoxesByScore(boxes_out[0], scores_out[0],
class_indices_out[0],
cmd_args.detector_thresh)
box_io.WriteToFile(out_boxes_fullpath, selected_boxes, selected_scores,
selected_class_indices)
if cmd_args.output_viz_dir:
out_viz_filename = base_boxes_filename + _VIZ_SUFFIX
out_viz_fullpath = os.path.join(cmd_args.output_viz_dir, out_viz_filename)
_PlotBoxesAndSaveImage(im[0], selected_boxes, out_viz_fullpath)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--detector_path',
type=str,
default='/tmp/d2r_frcnn_20190411/',
help="""
Path to exported detector model.
""")
parser.add_argument(
'--detector_thresh',
type=float,
default=.0,
help="""
Detector threshold. Any box with confidence score lower than this is not
returned.
""")
parser.add_argument(
'--list_images_path',
type=str,
default='list_images.txt',
help="""
Path to list of images to undergo object detection.
""")
parser.add_argument(
'--output_dir',
type=str,
default='test_boxes',
help="""
Directory where bounding boxes will be written to. Each image's boxes
will be written to a file with same name, and extension replaced by
.boxes.
""")
parser.add_argument(
'--output_viz_dir',
type=str,
default='',
help="""
Optional. If set, a visualization of the detected boxes overlaid on the
image is produced, and saved to this directory. Each image is saved with
_viz.jpg suffix.
""")
cmd_args, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 | -4,798,036,224,904,665,000 | 31.79476 | 80 | 0.641145 | false |
DG-i/openshift-ansible | roles/lib_openshift/src/ansible/oc_edit.py | 42 | 1774 | # pylint: skip-file
# flake8: noqa
def main():
'''
ansible oc module for editing objects
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present']),
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default=None, required=True, type='str'),
kind=dict(required=True,
type='str',
choices=['dc', 'deploymentconfig',
'rc', 'replicationcontroller',
'svc', 'service',
'scc', 'securitycontextconstraints',
'ns', 'namespace', 'project', 'projects',
'is', 'imagestream',
'istag', 'imagestreamtag',
'bc', 'buildconfig',
'routes',
'node',
'secret',
'pv', 'persistentvolume']),
file_name=dict(default=None, type='str'),
file_format=dict(default='yaml', type='str'),
content=dict(default=None, required=True, type='dict'),
force=dict(default=False, type='bool'),
separator=dict(default='.', type='str'),
),
supports_check_mode=True,
)
rval = Edit.run_ansible(module.params, module.check_mode)
if 'failed' in rval:
module.fail_json(**rval)
module.exit_json(**rval)
if __name__ == '__main__':
main()
| apache-2.0 | -5,314,907,019,347,981,000 | 35.958333 | 87 | 0.456595 | false |
demon-ru/iml-crm | addons/hw_posbox_upgrade/controllers/main.py | 172 | 4161 | # -*- coding: utf-8 -*-
import logging
import os
import time
import openerp
import openerp.addons.hw_proxy.controllers.main as hw_proxy
import threading
from openerp import http
from openerp.http import request
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
upgrade_template = """
<!DOCTYPE HTML>
<html>
<head>
<title>Odoo's PosBox - Software Upgrade</title>
<script src="http://code.jquery.com/jquery-1.11.0.min.js"></script>
<script>
$(function(){
var upgrading = false;
$('#upgrade').click(function(){
console.log('click');
if(!upgrading){
upgrading = true;
$('#upgrade').text('Upgrading, Please Wait');
$.ajax({
url:'/hw_proxy/perform_upgrade/'
}).then(function(status){
$('#upgrade').html('Upgrade Successful<br \\>Click to Restart the PosBox');
$('#upgrade').off('click');
$('#upgrade').click(function(){
$.ajax({ url:'/hw_proxy/perform_restart' })
$('#upgrade').text('Restarting');
$('#upgrade').off('click');
setTimeout(function(){
window.location = '/'
},30*1000);
});
},function(){
$('#upgrade').text('Upgrade Failed');
});
}
});
});
</script>
<style>
body {
width: 480px;
margin: 60px auto;
font-family: sans-serif;
text-align: justify;
color: #6B6B6B;
}
.centering{
text-align: center;
}
#upgrade {
padding: 20px;
background: rgb(121, 197, 107);
color: white;
border-radius: 3px;
text-align: center;
margin: 30px;
text-decoration: none;
display: inline-block;
}
</style>
</head>
<body>
<h1>PosBox Software Upgrade</h1>
<p>
This tool will help you perform an upgrade of the PosBox's software.
However the preferred method to upgrade the posbox is to flash the sd-card with
the <a href='http://nightly.openerp.com/trunk/posbox/'>latest image</a>. The upgrade
procedure is explained into to the <a href='/hw_proxy/static/doc/manual.pdf'>PosBox manual</a>
</p>
<p>
To upgrade the posbox, click on the upgrade button. The upgrade will take a few minutes. <b>Do not reboot</b> the PosBox during the upgrade.
</p>
<div class='centering'>
<a href='#' id='upgrade'>Upgrade</a>
</div>
</body>
</html>
"""
class PosboxUpgrader(hw_proxy.Proxy):
def __init__(self):
super(PosboxUpgrader,self).__init__()
self.upgrading = threading.Lock()
self.last_upgrade = 0
@http.route('/hw_proxy/upgrade', type='http', auth='none', )
def upgrade(self):
return upgrade_template
@http.route('/hw_proxy/perform_upgrade', type='http', auth='none')
def perform_upgrade(self):
self.upgrading.acquire()
if time.time() - self.last_upgrade < 30:
self.upgrading.release()
return 'UPTODATE'
else:
os.system('/bin/bash /home/pi/openerp/update.sh')
self.last_upgrade = time.time()
self.upgrading.release()
return 'SUCCESS'
@http.route('/hw_proxy/perform_restart', type='http', auth='none')
def perform_restart(self):
self.upgrading.acquire()
if time.time() - self.last_upgrade < 30:
self.upgrading.release()
return 'RESTARTED'
else:
os.system('/bin/bash /home/pi/openerp/restart.sh')
self.last_upgrade = time.time()
self.upgrading.release()
return 'SUCCESS'
| agpl-3.0 | -6,774,643,513,960,914,000 | 32.02381 | 148 | 0.503244 | false |
mindm/2017Challenges | challenge_6/python/slandau3/ranges.py | 5 | 1713 | #!/usr/bin/env python3
# @author Slandau3
def ranges(input: list) -> list:
# The patterns_found list keeps track of the definite ranges we have completed
patterns_found = []
# pattern_in_progress keeps track of a pattern we are tracing
pattern_in_progress = []
for i in input:
if len(pattern_in_progress) == 0: # Add the beginning of the input to the pattern_inprogress list
pattern_in_progress.append(i)
elif i == pattern_in_progress[-1]+1:
# If the integer we are currently looking at is the same as the previous number
# in pattern_in_progress +1, then we know the range will continue
pattern_in_progress.append(i)
else:
# If you're here than we found an integer that is not the previous integer + 1
if pattern_in_progress[0] == pattern_in_progress[-1]: # Case for two "rangeless" integers in order
# ex: 1, 2, 3, 14, 25, 30, 31, 32
pattern_in_progress = [i]
continue # ends this iteration of the loop
# The fact that we have gotten this far means we are ready to add the range to the patterns_found list
patterns_found.append("{num1}->{num2}".format(num1=pattern_in_progress[0], num2=pattern_in_progress[-1]))
pattern_in_progress = [i]
if len(pattern_in_progress) > 1:
# This if statement ensures that a range that was in progress when the loop ended is added to
# the patterns_found list.
patterns_found.append("{num1}->{num2}".format(num1=pattern_in_progress[0], num2=pattern_in_progress[-1]))
return patterns_found
if __name__ == '__main__':
ranges(list(input())) | mit | 7,001,529,812,573,667,000 | 45.324324 | 117 | 0.630473 | false |
bentilly/heroes | lib/werkzeug/contrib/limiter.py | 295 | 1333 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.limiter
~~~~~~~~~~~~~~~~~~~~~~~~
A middleware that limits incoming data. This works around problems with
Trac_ or Django_ because those directly stream into the memory.
.. _Trac: http://trac.edgewall.org/
.. _Django: http://www.djangoproject.com/
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from warnings import warn
from werkzeug.wsgi import LimitedStream
class StreamLimitMiddleware(object):
"""Limits the input stream to a given number of bytes. This is useful if
you have a WSGI application that reads form data into memory (django for
example) and you don't want users to harm the server by uploading tons of
data.
Default is 10MB
.. versionchanged:: 0.9
Deprecated middleware.
"""
def __init__(self, app, maximum_size=1024 * 1024 * 10):
warn(DeprecationWarning('This middleware is deprecated'))
self.app = app
self.maximum_size = maximum_size
def __call__(self, environ, start_response):
limit = min(self.maximum_size, int(environ.get('CONTENT_LENGTH') or 0))
environ['wsgi.input'] = LimitedStream(environ['wsgi.input'], limit)
return self.app(environ, start_response)
| apache-2.0 | -5,936,152,903,061,632,000 | 32.325 | 79 | 0.662416 | false |
vrenkens/Nabu-asr | nabu/neuralnetworks/components/layer.py | 2 | 3940 | '''@file layer.py
Neural network layers '''
import tensorflow as tf
from tensorflow.python.ops.rnn import bidirectional_dynamic_rnn
from nabu.neuralnetworks.components import ops
def blstm(
inputs,
sequence_length,
num_units,
layer_norm=False,
scope=None):
'''
a BLSTM layer
args:
inputs: the input to the layer as a
[batch_size, max_length, dim] tensor
sequence_length: the length of the input sequences as a
[batch_size] tensor
num_units: The number of units in the one directon
layer_norm: whether layer normalization should be applied
scope: The variable scope sets the namespace under which
the variables created during this call will be stored.
returns:
the blstm outputs
'''
with tf.variable_scope(scope or 'BLSTM'):
#create the lstm cell that will be used for the forward and backward
#pass
lstm_cell_fw = tf.contrib.rnn.LayerNormBasicLSTMCell(
num_units=num_units,
layer_norm=layer_norm,
reuse=tf.get_variable_scope().reuse)
lstm_cell_bw = tf.contrib.rnn.LayerNormBasicLSTMCell(
num_units,
layer_norm=layer_norm,
reuse=tf.get_variable_scope().reuse)
#do the forward computation
outputs_tupple, _ = bidirectional_dynamic_rnn(
lstm_cell_fw, lstm_cell_bw, inputs, dtype=tf.float32,
sequence_length=sequence_length)
outputs = tf.concat(outputs_tupple, 2)
return outputs
def pblstm(
inputs,
sequence_length,
num_units,
num_steps=2,
layer_norm=False,
scope=None):
'''
a Pyramidal BLSTM layer
args:
inputs: the input to the layer as a
[batch_size, max_length, dim] tensor
sequence_length: the length of the input sequences as a
[batch_size] tensor
num_units: The number of units in the one directon
num_steps: the number of time steps to concatenate
layer_norm: whether layer normalization should be applied
scope: The variable scope sets the namespace under which
the variables created during this call will be stored.
returns:
- the PBLSTM outputs
- the new sequence lengths
'''
with tf.variable_scope(scope or 'PBLSTM'):
#apply blstm layer
outputs = blstm(
inputs=inputs,
sequence_length=sequence_length,
num_units=num_units,
layer_norm=layer_norm
)
#stack the outputs
outputs, output_seq_lengths = ops.pyramid_stack(
outputs,
sequence_length,
num_steps)
return outputs, output_seq_lengths
def projected_subsampling(inputs, input_seq_lengths, num_steps, name=None):
'''
apply projected subsampling, this is concatenating 2 timesteps,
projecting to a lower dimensionality, applying batch_normalization
and a relu layer
args:
inputs: a [batch_size x max_length x dim] input tensorflow
input_seq_lengths: the input sequence lengths as a [batch_size] vector
num_steps: the number of steps to concatenate
is_training: bool training mode
name: the name of the operation
returns:
- a [batch_size x ceil(max_length/2) x dim] output tensor
- the output sequence lengths as a [batch_size] vector
'''
with tf.variable_scope(name or 'subsampling'):
input_dim = int(inputs.get_shape()[2])
#concatenate 2 timesteps
stacked_inputs, output_seq_lengths = ops.pyramid_stack(
inputs,
input_seq_lengths,
num_steps)
#project back to the input dimension
outputs = tf.contrib.layers.linear(stacked_inputs, input_dim)
return outputs, output_seq_lengths
| mit | 8,420,736,518,116,136,000 | 30.269841 | 78 | 0.621574 | false |
dohoangkhiem/ansible-modules-extras | cloud/rackspace/rax_mon_entity.py | 123 | 6171 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_mon_entity
short_description: Create or delete a Rackspace Cloud Monitoring entity
description:
- Create or delete a Rackspace Cloud Monitoring entity, which represents a device
to monitor. Entities associate checks and alarms with a target system and
provide a convenient, centralized place to store IP addresses. Rackspace
monitoring module flow | *rax_mon_entity* -> rax_mon_check ->
rax_mon_notification -> rax_mon_notification_plan -> rax_mon_alarm
version_added: "2.0"
options:
label:
description:
- Defines a name for this entity. Must be a non-empty string between 1 and
255 characters long.
required: true
state:
description:
- Ensure that an entity with this C(name) exists or does not exist.
choices: ["present", "absent"]
agent_id:
description:
- Rackspace monitoring agent on the target device to which this entity is
bound. Necessary to collect C(agent.) rax_mon_checks against this entity.
named_ip_addresses:
description:
- Hash of IP addresses that may be referenced by name by rax_mon_checks
added to this entity. Must be a dictionary of with keys that are names
between 1 and 64 characters long, and values that are valid IPv4 or IPv6
addresses.
metadata:
description:
- Hash of arbitrary C(name), C(value) pairs that are passed to associated
rax_mon_alarms. Names and values must all be between 1 and 255 characters
long.
author: Ash Wilson
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Entity example
gather_facts: False
hosts: local
connection: local
tasks:
- name: Ensure an entity exists
rax_mon_entity:
credentials: ~/.rax_pub
state: present
label: my_entity
named_ip_addresses:
web_box: 192.168.0.10
db_box: 192.168.0.11
meta:
hurf: durf
register: the_entity
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def cloud_monitoring(module, state, label, agent_id, named_ip_addresses,
metadata):
if len(label) < 1 or len(label) > 255:
module.fail_json(msg='label must be between 1 and 255 characters long')
changed = False
cm = pyrax.cloud_monitoring
if not cm:
module.fail_json(msg='Failed to instantiate client. This typically '
'indicates an invalid region or an incorrectly '
'capitalized region name.')
existing = []
for entity in cm.list_entities():
if label == entity.label:
existing.append(entity)
entity = None
if existing:
entity = existing[0]
if state == 'present':
should_update = False
should_delete = False
should_create = False
if len(existing) > 1:
module.fail_json(msg='%s existing entities have the label %s.' %
(len(existing), label))
if entity:
if named_ip_addresses and named_ip_addresses != entity.ip_addresses:
should_delete = should_create = True
# Change an existing Entity, unless there's nothing to do.
should_update = agent_id and agent_id != entity.agent_id or \
(metadata and metadata != entity.metadata)
if should_update and not should_delete:
entity.update(agent_id, metadata)
changed = True
if should_delete:
entity.delete()
else:
should_create = True
if should_create:
# Create a new Entity.
entity = cm.create_entity(label=label, agent=agent_id,
ip_addresses=named_ip_addresses,
metadata=metadata)
changed = True
else:
# Delete the existing Entities.
for e in existing:
e.delete()
changed = True
if entity:
entity_dict = {
"id": entity.id,
"name": entity.name,
"agent_id": entity.agent_id,
}
module.exit_json(changed=changed, entity=entity_dict)
else:
module.exit_json(changed=changed)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
state=dict(default='present', choices=['present', 'absent']),
label=dict(required=True),
agent_id=dict(),
named_ip_addresses=dict(type='dict', default={}),
metadata=dict(type='dict', default={})
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
state = module.params.get('state')
label = module.params.get('label')
agent_id = module.params.get('agent_id')
named_ip_addresses = module.params.get('named_ip_addresses')
metadata = module.params.get('metadata')
setup_rax_module(module, pyrax)
cloud_monitoring(module, state, label, agent_id, named_ip_addresses, metadata)
# Import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
# Invoke the module.
main()
| gpl-3.0 | -6,354,907,272,350,982,000 | 31.140625 | 82 | 0.634905 | false |
marcoscaceres/AlarmAPI | node_modules/grunt-contrib-uglify/node_modules/gzip-js/test/zipTest.py | 182 | 1836 | import os
from helpers import run_cmd
from colorama import Fore
defaultTestDir = 'test-files'
defaultOutDir = 'test-outs'
"""
Run a single test
@param tFile- required; the full path to the file to run
@param level- optional (default: all); the compression level [1-9]
@return True if all tests passed; False if at least one test failed
"""
def runTest(tFile, level=None, outDir=defaultOutDir):
passed = True
if level == None:
for x in range(1, 10):
if runTest(tFile, x, outDir) == False:
passed = False
return passed
out1 = os.path.join(outDir, '%(file)s.%(level)d.gz' % {'file': os.path.basename(tFile), 'level' : level})
out2 = os.path.join(outDir, '%(file)s.%(level)d.out.gz' % {'file': os.path.basename(tFile), 'level' : level})
run_cmd('gzip -c -%(level)d %(file)s > %(outfile)s' % {'level' : level, 'file' : tFile, 'outfile' : out1})
run_cmd('../bin/gzip.js --level %(level)d --file %(file)s --output %(output)s' % {'level' : level, 'file' : tFile, 'output' : out2})
result = run_cmd('diff %(file1)s %(file2)s' % {'file1' : out1, 'file2' : out2})
if result['returncode'] == 0:
status = Fore.GREEN + 'PASSED' + Fore.RESET
else:
passed = False
status = Fore.RED + 'FAILED' + Fore.RESET
print 'Level %(level)d: %(status)s' % {'level' : level, 'status' : status}
return passed
"""
Runs all tests on the given level. This iterates throuth the testDir directory defined above.
@param level- The level to run on [1-9] (default: None, runs on all levels all)
@return True if all levels passed, False if at least one failed
"""
def runAll(level=None, testDir=defaultTestDir):
passed = True
for tFile in os.listdir(testDir):
fullPath = os.path.join(testDir, tFile)
print Fore.YELLOW + tFile + Fore.RESET
if runTest(fullPath, level) == False:
passed = False
print ''
return passed
| mit | 7,189,911,731,008,144,000 | 29.6 | 133 | 0.665577 | false |
AICP/external_chromium_org | build/android/pylib/instrumentation/test_jar.py | 11 | 9038 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper class for instrumenation test jar."""
# pylint: disable=W0702
import collections
import logging
import os
import pickle
import re
import sys
from pylib import cmd_helper
from pylib import constants
sys.path.insert(0,
os.path.join(constants.DIR_SOURCE_ROOT,
'build', 'util', 'lib', 'common'))
import unittest_util # pylint: disable=F0401
# If you change the cached output of proguard, increment this number
PICKLE_FORMAT_VERSION = 1
class TestJar(object):
_ANNOTATIONS = frozenset(
['Smoke', 'SmallTest', 'MediumTest', 'LargeTest', 'EnormousTest',
'FlakyTest', 'DisabledTest', 'Manual', 'PerfTest', 'HostDrivenTest'])
_DEFAULT_ANNOTATION = 'SmallTest'
_PROGUARD_CLASS_RE = re.compile(r'\s*?- Program class:\s*([\S]+)$')
_PROGUARD_METHOD_RE = re.compile(r'\s*?- Method:\s*(\S*)[(].*$')
_PROGUARD_ANNOTATION_RE = re.compile(r'\s*?- Annotation \[L(\S*);\]:$')
_PROGUARD_ANNOTATION_CONST_RE = (
re.compile(r'\s*?- Constant element value.*$'))
_PROGUARD_ANNOTATION_VALUE_RE = re.compile(r'\s*?- \S+? \[(.*)\]$')
def __init__(self, jar_path):
if not os.path.exists(jar_path):
raise Exception('%s not found, please build it' % jar_path)
self._PROGUARD_PATH = os.path.join(constants.ANDROID_SDK_ROOT,
'tools/proguard/lib/proguard.jar')
if not os.path.exists(self._PROGUARD_PATH):
self._PROGUARD_PATH = os.path.join(os.environ['ANDROID_BUILD_TOP'],
'external/proguard/lib/proguard.jar')
self._jar_path = jar_path
self._annotation_map = collections.defaultdict(list)
self._pickled_proguard_name = self._jar_path + '-proguard.pickle'
self._test_methods = []
if not self._GetCachedProguardData():
self._GetProguardData()
def _GetCachedProguardData(self):
if (os.path.exists(self._pickled_proguard_name) and
(os.path.getmtime(self._pickled_proguard_name) >
os.path.getmtime(self._jar_path))):
logging.info('Loading cached proguard output from %s',
self._pickled_proguard_name)
try:
with open(self._pickled_proguard_name, 'r') as r:
d = pickle.loads(r.read())
if d['VERSION'] == PICKLE_FORMAT_VERSION:
self._annotation_map = d['ANNOTATION_MAP']
self._test_methods = d['TEST_METHODS']
return True
except:
logging.warning('PICKLE_FORMAT_VERSION has changed, ignoring cache')
return False
def _GetProguardData(self):
proguard_output = cmd_helper.GetCmdOutput(['java', '-jar',
self._PROGUARD_PATH,
'-injars', self._jar_path,
'-dontshrink',
'-dontoptimize',
'-dontobfuscate',
'-dontpreverify',
'-dump',
]).split('\n')
clazz = None
method = None
annotation = None
has_value = False
qualified_method = None
for line in proguard_output:
m = self._PROGUARD_CLASS_RE.match(line)
if m:
clazz = m.group(1).replace('/', '.') # Change package delim.
annotation = None
continue
m = self._PROGUARD_METHOD_RE.match(line)
if m:
method = m.group(1)
annotation = None
qualified_method = clazz + '#' + method
if method.startswith('test') and clazz.endswith('Test'):
self._test_methods += [qualified_method]
continue
if not qualified_method:
# Ignore non-method annotations.
continue
m = self._PROGUARD_ANNOTATION_RE.match(line)
if m:
annotation = m.group(1).split('/')[-1] # Ignore the annotation package.
self._annotation_map[qualified_method].append(annotation)
has_value = False
continue
if annotation:
if not has_value:
m = self._PROGUARD_ANNOTATION_CONST_RE.match(line)
if m:
has_value = True
else:
m = self._PROGUARD_ANNOTATION_VALUE_RE.match(line)
if m:
value = m.group(1)
self._annotation_map[qualified_method].append(
annotation + ':' + value)
has_value = False
logging.info('Storing proguard output to %s', self._pickled_proguard_name)
d = {'VERSION': PICKLE_FORMAT_VERSION,
'ANNOTATION_MAP': self._annotation_map,
'TEST_METHODS': self._test_methods}
with open(self._pickled_proguard_name, 'w') as f:
f.write(pickle.dumps(d))
def _GetAnnotationMap(self):
return self._annotation_map
@staticmethod
def _IsTestMethod(test):
class_name, method = test.split('#')
return class_name.endswith('Test') and method.startswith('test')
def GetTestAnnotations(self, test):
"""Returns a list of all annotations for the given |test|. May be empty."""
if not self._IsTestMethod(test):
return []
return self._GetAnnotationMap()[test]
@staticmethod
def _AnnotationsMatchFilters(annotation_filter_list, annotations):
"""Checks if annotations match any of the filters."""
if not annotation_filter_list:
return True
for annotation_filter in annotation_filter_list:
filters = annotation_filter.split('=')
if len(filters) == 2:
key = filters[0]
value_list = filters[1].split(',')
for value in value_list:
if key + ':' + value in annotations:
return True
elif annotation_filter in annotations:
return True
return False
def GetAnnotatedTests(self, annotation_filter_list):
"""Returns a list of all tests that match the given annotation filters."""
return [test for test, annotations in self._GetAnnotationMap().iteritems()
if self._IsTestMethod(test) and self._AnnotationsMatchFilters(
annotation_filter_list, annotations)]
def GetTestMethods(self):
"""Returns a list of all test methods in this apk as Class#testMethod."""
return self._test_methods
def _GetTestsMissingAnnotation(self):
"""Get a list of test methods with no known annotations."""
tests_missing_annotations = []
for test_method in self.GetTestMethods():
annotations_ = frozenset(self.GetTestAnnotations(test_method))
if (annotations_.isdisjoint(self._ANNOTATIONS) and
not self.IsHostDrivenTest(test_method)):
tests_missing_annotations.append(test_method)
return sorted(tests_missing_annotations)
def GetAllMatchingTests(self, annotation_filter_list,
exclude_annotation_list, test_filter):
"""Get a list of tests matching any of the annotations and the filter.
Args:
annotation_filter_list: List of test annotations. A test must have at
least one of these annotations. A test without any annotations is
considered to be SmallTest.
exclude_annotation_list: List of test annotations. A test must not have
any of these annotations.
test_filter: Filter used for partial matching on the test method names.
Returns:
List of all matching tests.
"""
if annotation_filter_list:
available_tests = self.GetAnnotatedTests(annotation_filter_list)
# Include un-annotated tests in SmallTest.
if annotation_filter_list.count(self._DEFAULT_ANNOTATION) > 0:
for test in self._GetTestsMissingAnnotation():
logging.warning(
'%s has no annotations. Assuming "%s".', test,
self._DEFAULT_ANNOTATION)
available_tests.append(test)
if exclude_annotation_list:
excluded_tests = self.GetAnnotatedTests(exclude_annotation_list)
available_tests = list(set(available_tests) - set(excluded_tests))
else:
available_tests = [m for m in self.GetTestMethods()
if not self.IsHostDrivenTest(m)]
tests = []
if test_filter:
# |available_tests| are in adb instrument format: package.path.class#test.
# Maps a 'class.test' name to each 'package.path.class#test' name.
sanitized_test_names = dict([
(t.split('.')[-1].replace('#', '.'), t) for t in available_tests])
# Filters 'class.test' names and populates |tests| with the corresponding
# 'package.path.class#test' names.
tests = [
sanitized_test_names[t] for t in unittest_util.FilterTestNames(
sanitized_test_names.keys(), test_filter.replace('#', '.'))]
else:
tests = available_tests
return tests
@staticmethod
def IsHostDrivenTest(test):
return 'pythonDrivenTests' in test
| bsd-3-clause | -1,017,126,872,567,058,200 | 37.459574 | 80 | 0.610201 | false |
carloshwa/apps-android-wikipedia | scripts/make-templates.py | 2 | 6090 | #!/usr/bin/env python2
# coding=utf-8
import copy
import os
import json
import unicodecsv as csv
import codecs
from urllib2 import urlopen
from jinja2 import Environment, FileSystemLoader
CHINESE_WIKI_LANG = "zh"
SIMPLIFIED_CHINESE_LANG = "zh-hans"
TRADITIONAL_CHINESE_LANG = "zh-hant"
# Wikis that cause problems and hence we pretend
# do not exist.
# - "got" -> Gothic runes wiki. The name of got in got
# contains characters outside the Unicode BMP. Android
# hard crashes on these. Let's ignore these fellas
# for now.
OSTRICH_WIKIS = [u"got"]
# Represents a single wiki, along with arbitrary properties of that wiki
# Simple data container object
class Wiki(object):
def __init__(self, lang):
self.lang = lang
self.props = {}
# Represents a list of wikis plus their properties.
# Encapsulates rendering code as well
class WikiList(object):
def __init__(self, wikis):
self.wikis = wikis
self.template_env = Environment(loader=FileSystemLoader(
os.path.join(os.path.dirname(os.path.realpath(__file__)), u"templates")
))
def render(self, template, class_name, **kwargs):
data = {
u"class_name": class_name,
u"wikis": self.wikis
}
data.update(kwargs)
rendered = self.template_env.get_template(template).render(**data)
out = codecs.open(class_name + u".java", u"w", u"utf-8")
out.write(rendered)
out.close()
def build_wiki(lang, english_name, local_name, total_pages=0):
wiki = Wiki(lang)
wiki.props["english_name"] = english_name
wiki.props["local_name"] = local_name
wiki.props["total_pages"] = total_pages
return wiki
def list_from_wikistats():
URL = u"https://wikistats.wmflabs.org/api.php?action=dump&table=wikipedias&format=csv&s=good"
print(u"Fetching languages")
data = csv.reader(urlopen(URL))
wikis = []
is_first = True
for row in data:
if is_first:
is_first = False
continue # skip headers
wiki = build_wiki(lang=row[2], english_name=row[1], local_name=row[10],
total_pages=row[3])
wikis.append(wiki)
return wikis
# Remove unsupported wikis.
def filter_supported_wikis(wikis):
return [wiki for wiki in wikis if wiki.lang not in OSTRICH_WIKIS]
# Apply manual tweaks to the list of wikis before they're populated.
def preprocess_wikis(wikis):
# Add TestWiki.
wikis.append(build_wiki(lang="test", english_name="Test", local_name="Test",
total_pages=0))
return wikis
# Apply manual tweaks to the list of wikis after they're populated.
def postprocess_wikis(wiki_list):
# Add Simplified and Traditional Chinese dialects.
chineseWiki = next((wiki for wiki in wiki_list.wikis if wiki.lang == CHINESE_WIKI_LANG), None)
chineseWikiIndex = wiki_list.wikis.index(chineseWiki)
simplifiedWiki = copy.deepcopy(chineseWiki)
simplifiedWiki.lang = SIMPLIFIED_CHINESE_LANG
simplifiedWiki.props["english_name"] = "Simplified Chinese"
simplifiedWiki.props["local_name"] = "简体"
wiki_list.wikis.insert(chineseWikiIndex + 1, simplifiedWiki)
traditionalWiki = copy.deepcopy(chineseWiki)
traditionalWiki.lang = TRADITIONAL_CHINESE_LANG
traditionalWiki.props["english_name"] = "Traditional Chinese"
traditionalWiki.props["local_name"] = "繁體"
wiki_list.wikis.insert(chineseWikiIndex + 2, traditionalWiki)
return wiki_list
# Populate the aliases for "Special:" and "File:" in all wikis
def populate_aliases(wikis):
for wiki in wikis.wikis:
print(u"Fetching Special Page and File alias for %s" % wiki.lang)
url = u"https://%s.wikipedia.org/w/api.php" % wiki.lang + \
u"?action=query&meta=siteinfo&format=json&siprop=namespaces"
data = json.load(urlopen(url))
# according to https://www.mediawiki.org/wiki/Manual:Namespace
# -1 seems to be the ID for Special Pages
wiki.props[u"special_alias"] = data[u"query"][u"namespaces"][u"-1"][u"*"]
# 6 is the ID for File pages
wiki.props[u"file_alias"] = data[u"query"][u"namespaces"][u"6"][u"*"]
return wikis
# Populates data on names of main page in each wiki
def populate_main_pages(wikis):
for wiki in wikis.wikis:
print(u"Fetching Main Page for %s" % wiki.lang)
url = u"https://%s.wikipedia.org/w/api.php" % wiki.lang + \
u"?action=query&meta=siteinfo&format=json&siprop=general"
data = json.load(urlopen(url))
wiki.props[u"main_page_name"] = data[u"query"][u"general"][u"mainpage"]
return wikis
# Returns a function that renders a particular template when passed
# a WikiList object
def render_template(template, filename, **kwargs):
def _actual_render(wikis):
wikis.render(template, filename, **kwargs)
return wikis
return _actual_render
# Render things into a simple key value JSON dict
# Useful for the iOS side of things
def render_simple_json(key, filename):
def _actual_render(wikis):
data = dict([(wiki.lang, wiki.props[key]) for wiki in wikis.wikis])
out = codecs.open(filename, u"w", u"utf-8")
out.write(json.dumps(data))
out.close()
return wikis
return _actual_render
# Kinda like reduce(), but special cases first function
def chain(*funcs):
res = funcs[0]()
for func in funcs[1:]:
res = func(res)
chain(
list_from_wikistats,
filter_supported_wikis,
preprocess_wikis,
WikiList,
populate_aliases,
populate_main_pages,
postprocess_wikis,
render_template(u"basichash.java.jinja", u"SpecialAliasData", key=u"special_alias"),
render_template(u"basichash.java.jinja", u"FileAliasData", key=u"file_alias"),
render_simple_json(u"special_alias", u"specialalias.json"),
render_simple_json(u"file_alias", u"filealias.json"),
render_template(u"basichash.java.jinja", u"MainPageNameData", key=u"main_page_name"),
render_simple_json(u"main_page_name", u"mainpages.json")
)
| apache-2.0 | 6,847,877,917,796,116,000 | 31.875676 | 98 | 0.665735 | false |
shsingh/ansible | test/units/modules/network/fortios/test_fortios_log_syslogd_override_filter.py | 21 | 9346 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_log_syslogd_override_filter
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_log_syslogd_override_filter.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_log_syslogd_override_filter_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_syslogd_override_filter': {
'anomaly': 'enable',
'dns': 'enable',
'filter': 'test_value_5',
'filter_type': 'include',
'forward_traffic': 'enable',
'gtp': 'enable',
'local_traffic': 'enable',
'multicast_traffic': 'enable',
'netscan_discovery': 'test_value_11,',
'netscan_vulnerability': 'test_value_12,',
'severity': 'emergency',
'sniffer_traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_syslogd_override_filter.fortios_log_syslogd(input_data, fos_instance)
expected_data = {
'anomaly': 'enable',
'dns': 'enable',
'filter': 'test_value_5',
'filter-type': 'include',
'forward-traffic': 'enable',
'gtp': 'enable',
'local-traffic': 'enable',
'multicast-traffic': 'enable',
'netscan-discovery': 'test_value_11,',
'netscan-vulnerability': 'test_value_12,',
'severity': 'emergency',
'sniffer-traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
}
set_method_mock.assert_called_with('log.syslogd', 'override-filter', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_log_syslogd_override_filter_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_syslogd_override_filter': {
'anomaly': 'enable',
'dns': 'enable',
'filter': 'test_value_5',
'filter_type': 'include',
'forward_traffic': 'enable',
'gtp': 'enable',
'local_traffic': 'enable',
'multicast_traffic': 'enable',
'netscan_discovery': 'test_value_11,',
'netscan_vulnerability': 'test_value_12,',
'severity': 'emergency',
'sniffer_traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_syslogd_override_filter.fortios_log_syslogd(input_data, fos_instance)
expected_data = {
'anomaly': 'enable',
'dns': 'enable',
'filter': 'test_value_5',
'filter-type': 'include',
'forward-traffic': 'enable',
'gtp': 'enable',
'local-traffic': 'enable',
'multicast-traffic': 'enable',
'netscan-discovery': 'test_value_11,',
'netscan-vulnerability': 'test_value_12,',
'severity': 'emergency',
'sniffer-traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
}
set_method_mock.assert_called_with('log.syslogd', 'override-filter', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_log_syslogd_override_filter_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_syslogd_override_filter': {
'anomaly': 'enable',
'dns': 'enable',
'filter': 'test_value_5',
'filter_type': 'include',
'forward_traffic': 'enable',
'gtp': 'enable',
'local_traffic': 'enable',
'multicast_traffic': 'enable',
'netscan_discovery': 'test_value_11,',
'netscan_vulnerability': 'test_value_12,',
'severity': 'emergency',
'sniffer_traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_syslogd_override_filter.fortios_log_syslogd(input_data, fos_instance)
expected_data = {
'anomaly': 'enable',
'dns': 'enable',
'filter': 'test_value_5',
'filter-type': 'include',
'forward-traffic': 'enable',
'gtp': 'enable',
'local-traffic': 'enable',
'multicast-traffic': 'enable',
'netscan-discovery': 'test_value_11,',
'netscan-vulnerability': 'test_value_12,',
'severity': 'emergency',
'sniffer-traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
}
set_method_mock.assert_called_with('log.syslogd', 'override-filter', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_log_syslogd_override_filter_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_syslogd_override_filter': {
'random_attribute_not_valid': 'tag',
'anomaly': 'enable',
'dns': 'enable',
'filter': 'test_value_5',
'filter_type': 'include',
'forward_traffic': 'enable',
'gtp': 'enable',
'local_traffic': 'enable',
'multicast_traffic': 'enable',
'netscan_discovery': 'test_value_11,',
'netscan_vulnerability': 'test_value_12,',
'severity': 'emergency',
'sniffer_traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_syslogd_override_filter.fortios_log_syslogd(input_data, fos_instance)
expected_data = {
'anomaly': 'enable',
'dns': 'enable',
'filter': 'test_value_5',
'filter-type': 'include',
'forward-traffic': 'enable',
'gtp': 'enable',
'local-traffic': 'enable',
'multicast-traffic': 'enable',
'netscan-discovery': 'test_value_11,',
'netscan-vulnerability': 'test_value_12,',
'severity': 'emergency',
'sniffer-traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
}
set_method_mock.assert_called_with('log.syslogd', 'override-filter', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 | -488,594,555,072,151,900 | 35.65098 | 133 | 0.601755 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.