code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
#! /usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 27 18:31:59 2017
@author: katsuya.ishiyama
"""
from numpy import random
# Definition of module level constants
SUCCESS_CODE = 1
FAILURE_CODE = 0
class Strategy():
def __init__(self, n):
_success_probability = _generate_success_probability(n)
_strategy = {i: p for i, p in enumerate(_success_probability, 1)}
self._n = n
self.strategy = _strategy
self.stock_of_strategy = list(_strategy.keys())
self.tried_strategy = []
self.current_strategy = None
self.previous_strategy = None
self.count_same_strategy = 0
self._result_of_trial = None
def choose_strategy(self):
if not self.stock_of_strategy:
raise ValueError('There is no strategy in stock.')
_chosen_id = random.choice(self.stock_of_strategy, 1)[0]
self.previous_strategy = self.current_strategy
self.current_strategy = _chosen_id
self.count_same_strategy = 0
self.stock_of_strategy.remove(_chosen_id)
_chosen_strategy = {
'chosen_strategy': _chosen_id,
'success_probability': self._get_success_probability()
}
return _chosen_strategy
def _get_success_probability(self):
return self.strategy[self.current_strategy]
def try_strategy(self):
if not self.current_strategy:
raise ValueError('No strategy is chosen.')
self.tried_strategy.append(self.current_strategy)
self._result_of_trial = _get_trial_result(
p=self._get_success_probability()
)
if self.current_strategy == self.previous_strategy:
self.count_same_strategy += 1
return self._result_of_trial
def _get_trial_result(p):
_trial_result = random.choice([FAILURE_CODE, SUCCESS_CODE], size=1, p=[1 - p, p])
return _trial_result[0]
def _generate_success_probability(size):
return random.sample(size)
| Katsuya-Ishiyama/simulation | strategy/strategy.py | Python | mit | 2,013 |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2015 by PyCLibrary Authors, see AUTHORS for more details.
#
# Distributed under the terms of the MIT/X11 license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""
Used for extracting data such as macro definitions, variables, typedefs, and
function signatures from C header files.
"""
from __future__ import (division, unicode_literals, print_function,
absolute_import)
import sys
import re
import os
import logging
from inspect import cleandoc
from future.utils import istext, isbytes
from ast import literal_eval
from traceback import format_exc
from .errors import DefinitionError
from .utils import find_header
# Import parsing elements
from .thirdparty.pyparsing import \
(ParserElement, ParseResults, Forward, Optional, Word, WordStart,
WordEnd, Keyword, Regex, Literal, SkipTo, ZeroOrMore, OneOrMore,
Group, LineEnd, stringStart, quotedString, oneOf, nestedExpr,
delimitedList, restOfLine, cStyleComment, alphas, alphanums, hexnums,
lineno, Suppress)
ParserElement.enablePackrat()
logger = logging.getLogger(__name__)
__all__ = ['win_defs', 'CParser']
class Type(tuple):
"""
Representation of a C type. CParser uses this class to store the parsed
typedefs and the types of variable/func.
**ATTENTION:** Due to compatibility issues with 0.1.0 this class derives
from tuple and can be seen as the tuples from 0.1.0. In future this might
change to a tuple-like object!!!
Parameters
----------
type_spec : str
a string referring the base type of this type defintion. This may
either be a fundametal type (i.e. 'int', 'enum x') or a type definition
made by a typedef-statement
declarators : str or list of tuple
all following parameters are deriving a type from the type defined
until now. Types can be derived by:
- The string '*': define a pointer to the base type
(i.E. Type('int', '*'))
- The string '&': a reference. T.B.D.
- A list of integers of len 1: define an array with N elements
(N is the first and single entry in the list of integers). If N is
-1, the array definition is seen as 'int x[]'
(i.E. Type('int', [1])
- a N-tuple of 3-tuples: defines a function of N parameters. Every
parameter is a 3 tuple of the form:
(<parameter-name-or-None>, <param-type>, None).
Due to compatibility reasons the return value of the function is
stored in Type.type_spec parameter
(This is **not** the case for function pointers):
(i.E. Type(Type('int', '*'), ( ('param1', Type('int'), None), ) ) )
type_quals : dict of int to list of str (optional)
this optional (keyword-)argument allows to optionally add type
qualifiers for every declarator level. The key 0 refers the type
qualifier of type_spec, while 1 refers to declarators[0], 2 refers to
declarators[1] and so on.
To build more complex types any number of declarators can be combined. i.E.
>>> int * (*a[2])(char *, signed c[]);
if represented as:
>>> Type('int', '*',
>>> ( (None, Type('char', '*'), None),
>>> ('c', Type('signed', [-1]), None) )),
>>> '*', [2])
"""
# Cannot slot a subclass of tuple.
def __new__(cls, type_spec, *declarators, **argv):
return super(Type, cls).__new__(cls, (type_spec,) + declarators)
def __init__(self, type_spec, *declarators, **argv):
super(Type, self).__init__()
self.type_quals = (argv.pop('type_quals', None) or
((),) * (1 + len(declarators)))
if len(self.type_quals) != 1 + len(declarators):
raise ValueError("wrong number of type qualifiers")
assert len(argv) == 0, 'Invalid Parameter'
def __eq__(self, other):
if isinstance(other, Type):
if self.type_quals != other.type_quals:
return False
return super(Type, self).__eq__(other)
def __ne__(self, other):
return not self.__eq__(other)
@property
def declarators(self):
"""Return a tuple of all declarators.
"""
return tuple(self[1:])
@property
def type_spec(self):
"""Return the base type of this type.
"""
return self[0]
def is_fund_type(self):
"""Returns True, if this type is a fundamental type.
Fundamental types are all types, that are not defined via typedef
"""
if (self[0].startswith('struct ') or self[0].startswith('union ') or
self[0].startswith('enum ')):
return True
names = (num_types + nonnum_types + size_modifiers + sign_modifiers +
extra_type_list)
for w in self[0].split():
if w not in names:
return False
return True
def eval(self, type_map, used=None):
"""Resolves the type_spec of this type recursively if it is referring
to a typedef. For resolving the type type_map is used for lookup.
Returns a new Type object.
Parameters
----------
type_map : dict of str to Type
All typedefs that shall be resolved have to be stored in this
type_map.
used : list of str
For internal use only to prevent circular typedefs
"""
used = used or []
if self.is_fund_type():
# Remove 'signed' before returning evaluated type
return Type(re.sub(r'\bsigned\b', '', self.type_spec).strip(),
*self.declarators,
type_quals=self.type_quals)
parent = self.type_spec
if parent in used:
m = 'Recursive loop while evaluating types. (typedefs are {})'
raise DefinitionError(m.format(' -> '.join(used+[parent])))
used.append(parent)
if parent not in type_map:
m = 'Unknown type "{}" (typedefs are {})'
raise DefinitionError(m.format(parent, ' -> '.join(used)))
pt = type_map[parent]
evaled_type = Type(pt.type_spec, *(pt.declarators + self.declarators),
type_quals=(pt.type_quals[:-1] +
(pt.type_quals[-1] +
self.type_quals[0],) +
self.type_quals[1:])
)
return evaled_type.eval(type_map, used)
def add_compatibility_hack(self):
"""If This Type is refering to a function (**not** a function pointer)
a new type is returned, that matches the hack from version 0.1.0.
This hack enforces the return value be encapsulated in a separated Type
object:
Type('int', '*', ())
is converted to
Type(Type('int', '*'), ())
"""
if type(self[-1]) == tuple:
return Type(Type(*self[:-1], type_quals=self.type_quals[:-1]),
self[-1],
type_quals=((), self.type_quals[-1]))
else:
return self
def remove_compatibility_hack(self):
"""Returns a Type object, where the hack from .add_compatibility_hack()
is removed
"""
if len(self) == 2 and isinstance(self[0], Type):
return Type(*(self[0] + (self[1],)))
else:
return self
def __repr__(self):
type_qual_str = ('' if not any(self.type_quals) else
', type_quals='+repr(self.type_quals))
return (type(self).__name__ + '(' +
', '.join(map(repr, self)) + type_qual_str + ')')
class Compound(dict):
"""Base class for representing object using a dict-like interface.
"""
__slots__ = ()
def __init__(self, *members, **argv):
members = list(members)
pack = argv.pop('pack', None)
assert len(argv) == 0
super(Compound, self).__init__(dict(members=members, pack=pack))
def __repr__(self):
packParam = ', pack='+repr(self.pack) if self.pack is not None else ''
return (type(self).__name__ + '(' +
', '.join(map(repr, self.members)) + packParam + ')')
@property
def members(self):
return self['members']
@property
def pack(self):
return self['pack']
class Struct(Compound):
"""Representation of a C struct. CParser uses this class to store the parsed
structs.
**ATTENTION:** Due to compatibility issues with 0.1.0 this class derives
from dict and can be seen as the dicts from 0.1.0. In future this might
change to a dict-like object!!!
"""
__slots__ = ()
class Union(Compound):
"""Representation of a C union. CParser uses this class to store the parsed
unions.
**ATTENTION:** Due to compatibility issues with 0.1.0 this class derives
from dict and can be seen as the dicts from 0.1.0. In future this might
change to a dict-like object!!!
"""
__slots__ = ()
class Enum(dict):
"""Representation of a C enum. CParser uses this class to store the parsed
enums.
**ATTENTION:** Due to compatibility issues with 0.1.0 this class derives
from dict and can be seen as the dicts from 0.1.0. In future this might
change to a dict-like object!!!
"""
__slots__ = ()
def __init__(self, **args):
super(Enum, self).__init__(args)
def __repr__(self):
return (type(self).__name__ + '(' +
', '.join(nm + '=' + repr(val)
for nm, val in sorted(self.items())) +
')')
def win_defs(version='800'):
"""Loads selection of windows headers included with PyCLibrary.
These definitions can either be accessed directly or included before
parsing another file like this:
>>> windefs = c_parser.win_defs()
>>> p = c_parser.CParser("headerFile.h", copy_from=windefs)
Definitions are pulled from a selection of header files included in Visual
Studio (possibly not legal to distribute? Who knows.), some of which have
been abridged because they take so long to parse.
Parameters
----------
version : unicode
Version of the MSVC to consider when parsing.
Returns
-------
parser : CParser
CParser containing all the infos from te windows headers.
"""
header_files = ['WinNt.h', 'WinDef.h', 'WinBase.h', 'BaseTsd.h',
'WTypes.h', 'WinUser.h']
if not CParser._init:
logger.warning('Automatic initialisation : OS is assumed to be win32')
from .init import auto_init
auto_init('win32')
d = os.path.dirname(__file__)
p = CParser(
[os.path.join(d, 'headers', h) for h in header_files],
macros={'_WIN32': '', '_MSC_VER': version, 'CONST': 'const',
'NO_STRICT': None, 'MS_WIN32': ''},
process_all=False
)
p.process_all(cache=os.path.join(d, 'headers', 'WinDefs.cache'))
return p
class CParser(object):
"""Class for parsing C code to extract variable, struct, enum, and function
declarations as well as preprocessor macros.
This is not a complete C parser; instead, it is meant to simplify the
process of extracting definitions from header files in the absence of a
complete build system. Many files will require some amount of manual
intervention to parse properly (see 'replace' and extra arguments)
Parameters
----------
files : str or iterable, optional
File or files which should be parsed.
copy_from : CParser or iterable of CParser, optional
CParser whose definitions should be included.
replace : dict, optional
Specify som string replacements to perform before parsing. Format is
{'searchStr': 'replaceStr', ...}
process_all : bool, optional
Flag indicating whether files should be parsed immediatly. True by
default.
cache : unicode, optional
Path of the cache file from which to load definitions/to which save
definitions as parsing is an expensive operation.
kwargs :
Extra parameters may be used to specify the starting state of the
parser. For example, one could provide a set of missing type
declarations by types={'UINT': ('unsigned int'), 'STRING': ('char', 1)}
Similarly, preprocessor macros can be specified: macros={'WINAPI': ''}
Example
-------
Create parser object, load two files
>>> p = CParser(['header1.h', 'header2.h'])
Remove comments, preprocess, and search for declarations
>>> p.process_ all()
Just to see what was successfully parsed from the files
>>> p.print_all()
Access parsed declarations
>>> all_values = p.defs['values']
>>> functionSignatures = p.defs['functions']
To see what was not successfully parsed
>>> unp = p.process_all(return_unparsed=True)
>>> for s in unp:
print s
"""
#: Increment every time cache structure or parsing changes to invalidate
#: old cache files.
cache_version = 1
#: Private flag allowing to know if the parser has been initiliased.
_init = False
def __init__(self, files=None, copy_from=None, replace=None,
process_all=True, cache=None, **kwargs):
if not self._init:
logger.warning('Automatic initialisation based on OS detection')
from .init import auto_init
auto_init()
# Holds all definitions
self.defs = {}
# Holds definitions grouped by the file they came from
self.file_defs = {}
# Description of the struct packing rules as defined by #pragma pack
self.pack_list = {}
self.init_opts = kwargs.copy()
self.init_opts['files'] = []
self.init_opts['replace'] = {}
self.data_list = ['types', 'variables', 'fnmacros', 'macros',
'structs', 'unions', 'enums', 'functions', 'values']
self.file_order = []
self.files = {}
if files is not None:
if istext(files) or isbytes(files):
files = [files]
for f in self.find_headers(files):
self.load_file(f, replace)
# Initialize empty definition lists
for k in self.data_list:
self.defs[k] = {}
# Holds translations from typedefs/structs/unions to fundamental types
self.compiled_types = {}
self.current_file = None
# Import extra arguments if specified
for t in kwargs:
for k in kwargs[t].keys():
self.add_def(t, k, kwargs[t][k])
# Import from other CParsers if specified
if copy_from is not None:
if not isinstance(copy_from, (list, tuple)):
copy_from = [copy_from]
for p in copy_from:
self.import_dict(p.file_defs)
if process_all:
self.process_all(cache=cache)
def process_all(self, cache=None, return_unparsed=False,
print_after_preprocess=False):
""" Remove comments, preprocess, and parse declarations from all files.
This operates in memory, and thus does not alter the original files.
Parameters
----------
cache : unicode, optional
File path where cached results are be stored or retrieved. The
cache is automatically invalidated if any of the arguments to
__init__ are changed, or if the C files are newer than the cache.
return_unparsed : bool, optional
Passed directly to parse_defs.
print_after_preprocess : bool, optional
If true prints the result of preprocessing each file.
Returns
-------
results : list
List of the results from parse_defs.
"""
if cache is not None and self.load_cache(cache, check_validity=True):
logger.debug("Loaded cached definitions; will skip parsing.")
# Cached values loaded successfully, nothing left to do here
return
results = []
logger.debug(cleandoc('''Parsing C header files (no valid cache found).
This could take several minutes...'''))
for f in self.file_order:
if self.files[f] is None:
# This means the file could not be loaded and there was no
# cache.
mess = 'Could not find header file "{}" or a cache file.'
raise IOError(mess.format(f))
logger.debug("Removing comments from file '{}'...".format(f))
self.remove_comments(f)
logger.debug("Preprocessing file '{}'...".format(f))
self.preprocess(f)
if print_after_preprocess:
print("===== PREPROCSSED {} =======".format(f))
print(self.files[f])
logger.debug("Parsing definitions in file '{}'...".format(f))
results.append(self.parse_defs(f, return_unparsed))
if cache is not None:
logger.debug("Writing cache file '{}'".format(cache))
self.write_cache(cache)
return results
def load_cache(self, cache_file, check_validity=False):
"""Load a cache file.
Used internally if cache is specified in process_all().
Parameters
----------
cache_file : unicode
Path of the file from which the cache should be loaded.
check_validity : bool, optional
If True, then run several checks before loading the cache:
- cache file must not be older than any source files
- cache file must not be older than this library file
- options recorded in cache must match options used to initialize
CParser
Returns
-------
result : bool
Did the loading succeeded.
"""
# Make sure cache file exists
if not istext(cache_file):
raise ValueError("Cache file option must be a unicode.")
if not os.path.isfile(cache_file):
# If file doesn't exist, search for it in this module's path
d = os.path.dirname(__file__)
cache_file = os.path.join(d, "headers", cache_file)
if not os.path.isfile(cache_file):
logger.debug("Can't find requested cache file.")
return False
# Make sure cache is newer than all input files
if check_validity:
mtime = os.stat(cache_file).st_mtime
for f in self.file_order:
# If file does not exist, then it does not count against the
# validity of the cache.
if os.path.isfile(f) and os.stat(f).st_mtime > mtime:
logger.debug("Cache file is out of date.")
return False
try:
# Read cache file
import pickle
cache = pickle.load(open(cache_file, 'rb'))
# Make sure __init__ options match
if check_validity:
if cache['opts'] != self.init_opts:
db = logger.debug
db("Cache file is not valid")
db("It was created using different initialization options")
db('{}'.format(cache['opts']))
db('{}'.format(self.init_opts))
return False
else:
logger.debug("Cache init opts are OK:")
logger.debug('{}'.format(cache['opts']))
if cache['version'] < self.cache_version:
mess = "Cache file is not valid--cache format has changed."
logger.debug(mess)
return False
# Import all parse results
self.import_dict(cache['file_defs'])
return True
except Exception:
logger.exception("Warning--cache read failed:")
return False
def import_dict(self, data):
"""Import definitions from a dictionary.
The dict format should be the same as CParser.file_defs.
Used internally; does not need to be called manually.
"""
for f in data.keys():
self.current_file = f
for k in self.data_list:
for n in data[f][k]:
self.add_def(k, n, data[f][k][n])
def write_cache(self, cache_file):
"""Store all parsed declarations to cache. Used internally.
"""
cache = {}
cache['opts'] = self.init_opts
cache['file_defs'] = self.file_defs
cache['version'] = self.cache_version
import pickle
pickle.dump(cache, open(cache_file, 'wb'))
def find_headers(self, headers):
"""Try to find the specified headers.
"""
hs = []
for header in headers:
if os.path.isfile(header):
hs.append(header)
else:
h = find_header(header)
if not h:
raise OSError('Cannot find header: {}'.format(header))
hs.append(h)
return hs
def load_file(self, path, replace=None):
"""Read a file, make replacements if requested.
Called by __init__, should not be called manually.
Parameters
----------
path : unicode
Path of the file to load.
replace : dict, optional
Dictionary containing strings to replace by the associated value
when loading the file.
"""
if not os.path.isfile(path):
# Not a fatal error since we might be able to function properly if
# there is a cache file.
mess = "Warning: C header '{}' is missing, this may cause trouble."
logger.warning(mess.format(path))
self.files[path] = None
return False
# U causes all newline types to be converted to \n
with open(path, 'rU') as fd:
self.files[path] = fd.read()
if replace is not None:
for s in replace:
self.files[path] = re.sub(s, replace[s], self.files[path])
self.file_order.append(path)
bn = os.path.basename(path)
self.init_opts['replace'][bn] = replace
# Only interested in the file names, the directory may change between
# systems.
self.init_opts['files'].append(bn)
return True
def print_all(self, filename=None):
"""Print everything parsed from files. Useful for debugging.
Parameters
----------
filename : unicode, optional
Name of the file whose definition should be printed.
"""
from pprint import pprint
for k in self.data_list:
print("============== {} ==================".format(k))
if filename is None:
pprint(self.defs[k])
else:
pprint(self.file_defs[filename][k])
# =========================================================================
# --- Processing functions
# =========================================================================
def remove_comments(self, path):
"""Remove all comments from file.
Operates in memory, does not alter the original files.
"""
text = self.files[path]
cplusplus_line_comment = Literal("//") + restOfLine
# match quoted strings first to prevent matching comments inside quotes
comment_remover = (quotedString | cStyleComment.suppress() |
cplusplus_line_comment.suppress())
self.files[path] = comment_remover.transformString(text)
# --- Pre processing
def preprocess(self, path):
"""Scan named file for preprocessor directives, removing them while
expanding macros.
Operates in memory, does not alter the original files.
Currently support :
- conditionals : ifdef, ifndef, if, elif, else (defined can be used
in a if statement).
- definition : define, undef
- pragmas : pragma
"""
# We need this so that eval_expr works properly
self.build_parser()
self.current_file = path
# Stack for #pragma pack push/pop
pack_stack = [(None, None)]
self.pack_list[path] = [(0, None)]
packing = None # Current packing value
text = self.files[path]
# First join together lines split by \\n
text = Literal('\\\n').suppress().transformString(text)
# Define the structure of a macro definition
name = Word(alphas+'_', alphanums+'_')('name')
deli_list = Optional(lparen + delimitedList(name) + rparen)
self.pp_define = (name.setWhitespaceChars(' \t')("macro") +
deli_list.setWhitespaceChars(' \t')('args') +
SkipTo(LineEnd())('value'))
self.pp_define.setParseAction(self.process_macro_defn)
# Comb through lines, process all directives
lines = text.split('\n')
result = []
directive = re.compile(r'\s*#\s*([a-zA-Z]+)(.*)$')
if_true = [True]
if_hit = []
for i, line in enumerate(lines):
new_line = ''
m = directive.match(line)
# Regular code line
if m is None:
# Only include if we are inside the correct section of an IF
# block
if if_true[-1]:
new_line = self.expand_macros(line)
# Macro line
else:
d = m.groups()[0]
rest = m.groups()[1]
if d == 'ifdef':
d = 'if'
rest = 'defined ' + rest
elif d == 'ifndef':
d = 'if'
rest = '!defined ' + rest
# Evaluate 'defined' operator before expanding macros
if d in ['if', 'elif']:
def pa(t):
is_macro = t['name'] in self.defs['macros']
is_macro_func = t['name'] in self.defs['fnmacros']
return ['0', '1'][is_macro or is_macro_func]
rest = (Keyword('defined') +
(name | lparen + name + rparen)
).setParseAction(pa).transformString(rest)
elif d in ['define', 'undef']:
match = re.match(r'\s*([a-zA-Z_][a-zA-Z0-9_]*)(.*)$', rest)
macroName, rest = match.groups()
# Expand macros if needed
if rest is not None and (all(if_true) or d in ['if', 'elif']):
rest = self.expand_macros(rest)
if d == 'elif':
if if_hit[-1] or not all(if_true[:-1]):
ev = False
else:
ev = self.eval_preprocessor_expr(rest)
logger.debug(" "*(len(if_true)-2) + line +
'{}, {}'.format(rest, ev))
if_true[-1] = ev
if_hit[-1] = if_hit[-1] or ev
elif d == 'else':
logger.debug(" "*(len(if_true)-2) + line +
'{}'.format(not if_hit[-1]))
if_true[-1] = (not if_hit[-1]) and all(if_true[:-1])
if_hit[-1] = True
elif d == 'endif':
if_true.pop()
if_hit.pop()
logger.debug(" "*(len(if_true)-1) + line)
elif d == 'if':
if all(if_true):
ev = self.eval_preprocessor_expr(rest)
else:
ev = False
logger.debug(" "*(len(if_true)-1) + line +
'{}, {}'.format(rest, ev))
if_true.append(ev)
if_hit.append(ev)
elif d == 'define':
if not if_true[-1]:
continue
logger.debug(" "*(len(if_true)-1) + "define: " +
'{}, {}'.format(macroName, rest))
try:
# Macro is registered here
self.pp_define.parseString(macroName + ' ' + rest)
except Exception:
logger.exception("Error processing macro definition:" +
'{}, {}'.format(macroName, rest))
elif d == 'undef':
if not if_true[-1]:
continue
try:
self.rem_def('macros', macroName.strip())
except Exception:
if sys.exc_info()[0] is not KeyError:
mess = "Error removing macro definition '{}'"
logger.exception(mess.format(macroName.strip()))
# Check for changes in structure packing
# Support only for #pragme pack (with all its variants
# save show), None is used to signal that the default packing
# is used.
# Those two definition disagree :
# https://gcc.gnu.org/onlinedocs/gcc/Structure-Packing-Pragmas.html
# http://msdn.microsoft.com/fr-fr/library/2e70t5y1.aspx
# The current implementation follows the MSVC doc.
elif d == 'pragma':
if not if_true[-1]:
continue
m = re.match(r'\s+pack\s*\(([^\)]*)\)', rest)
if not m:
continue
if m.groups():
opts = [s.strip() for s in m.groups()[0].split(',')]
pushpop = id = val = None
for o in opts:
if o in ['push', 'pop']:
pushpop = o
elif o.isdigit():
val = int(o)
else:
id = o
packing = val
if pushpop == 'push':
pack_stack.append((packing, id))
elif opts[0] == 'pop':
if id is None:
pack_stack.pop()
else:
ind = None
for j, s in enumerate(pack_stack):
if s[1] == id:
ind = j
break
if ind is not None:
pack_stack = pack_stack[:ind]
if val is None:
packing = pack_stack[-1][0]
mess = ">> Packing changed to {} at line {}"
logger.debug(mess.format(str(packing), i))
self.pack_list[path].append((i, packing))
else:
# Ignore any other directives
mess = 'Ignored directive {} at line {}'
logger.debug(mess.format(d, i))
result.append(new_line)
self.files[path] = '\n'.join(result)
def eval_preprocessor_expr(self, expr):
# Make a few alterations so the expression can be eval'd
macro_diffs = (
Literal('!').setParseAction(lambda: ' not ') |
Literal('&&').setParseAction(lambda: ' and ') |
Literal('||').setParseAction(lambda: ' or ') |
Word(alphas + '_', alphanums + '_').setParseAction(lambda: '0'))
expr2 = macro_diffs.transformString(expr).strip()
try:
ev = bool(eval(expr2))
except Exception:
mess = "Error evaluating preprocessor expression: {} [{}]\n{}"
logger.debug(mess.format(expr, repr(expr2), format_exc()))
ev = False
return ev
def process_macro_defn(self, t):
"""Parse a #define macro and register the definition.
"""
logger.debug("Processing MACRO: {}".format(t))
macro_val = t.value.strip()
if macro_val in self.defs['fnmacros']:
self.add_def('fnmacros', t.macro, self.defs['fnmacros'][macro_val])
logger.debug(" Copy fn macro {} => {}".format(macro_val, t.macro))
else:
if t.args == '':
val = self.eval_expr(macro_val)
self.add_def('macros', t.macro, macro_val)
self.add_def('values', t.macro, val)
mess = " Add macro: {} ({}); {}"
logger.debug(mess.format(t.macro, val,
self.defs['macros'][t.macro]))
else:
self.add_def('fnmacros', t.macro,
self.compile_fn_macro(macro_val,
[x for x in t.args]))
mess = " Add fn macro: {} ({}); {}"
logger.debug(mess.format(t.macro, t.args,
self.defs['fnmacros'][t.macro]))
return "#define " + t.macro + " " + macro_val
def compile_fn_macro(self, text, args):
"""Turn a function macro spec into a compiled description.
"""
# Find all instances of each arg in text.
args_str = '|'.join(args)
arg_regex = re.compile(r'("(\\"|[^"])*")|(\b({})\b)'.format(args_str))
start = 0
parts = []
arg_order = []
# The group number to check for macro names
N = 3
for m in arg_regex.finditer(text):
arg = m.groups()[N]
if arg is not None:
parts.append(text[start:m.start(N)] + '{}')
start = m.end(N)
arg_order.append(args.index(arg))
parts.append(text[start:])
return (''.join(parts), arg_order)
def expand_macros(self, line):
"""Expand all the macro expressions in a string.
Faulty calls to macro function are left untouched.
"""
reg = re.compile(r'("(\\"|[^"])*")|(\b(\w+)\b)')
parts = []
# The group number to check for macro names
N = 3
macros = self.defs['macros']
fnmacros = self.defs['fnmacros']
while True:
m = reg.search(line)
if not m:
break
name = m.groups()[N]
if name in macros:
parts.append(line[:m.start(N)])
line = line[m.end(N):]
parts.append(macros[name])
elif name in fnmacros:
# If function macro expansion fails, just ignore it.
try:
exp, end = self.expand_fn_macro(name, line[m.end(N):])
except Exception:
exp = name
end = 0
mess = "Function macro expansion failed: {}, {}"
logger.error(mess.format(name, line[m.end(N):]))
parts.append(line[:m.start(N)])
start = end + m.end(N)
line = line[start:]
parts.append(exp)
else:
start = m.end(N)
parts.append(line[:start])
line = line[start:]
parts.append(line)
return ''.join(parts)
def expand_fn_macro(self, name, text):
"""Replace a function macro.
"""
# defn looks like ('%s + %s / %s', (0, 0, 1))
defn = self.defs['fnmacros'][name]
arg_list = (stringStart + lparen +
Group(delimitedList(expression))('args') + rparen)
res = [x for x in arg_list.scanString(text, 1)]
if len(res) == 0:
mess = "Function macro '{}' not followed by (...)"
raise DefinitionError(0, mess.format(name))
args, start, end = res[0]
args = [self.expand_macros(arg) for arg in args[0]]
new_str = defn[0].format(*[args[i] for i in defn[1]])
return (new_str, end)
# --- Compilation functions
def parse_defs(self, path, return_unparsed=False):
"""Scan through the named file for variable, struct, enum, and function
declarations.
Parameters
----------
path : unicode
Path of the file to parse for definitions.
return_unparsed : bool, optional
If true, return a string of all lines that failed to match (for
debugging purposes).
Returns
-------
tokens : list
Entire tree of successfully parsed tokens.
"""
self.current_file = path
parser = self.build_parser()
if return_unparsed:
text = parser.suppress().transformString(self.files[path])
return re.sub(r'\n\s*\n', '\n', text)
else:
return [x[0] for x in parser.scanString(self.files[path])]
def build_parser(self):
"""Builds the entire tree of parser elements for the C language (the
bits we support, anyway).
"""
if hasattr(self, 'parser'):
return self.parser
self.struct_type = Forward()
self.enum_type = Forward()
type_ = (fund_type |
Optional(kwl(size_modifiers + sign_modifiers)) + ident |
self.struct_type |
self.enum_type)
if extra_modifier is not None:
type_ += extra_modifier
type_.setParseAction(recombine)
self.type_spec = (type_qualifier('pre_qual') +
type_("name"))
# --- Abstract declarators for use in function pointer arguments
# Thus begins the extremely hairy business of parsing C declarators.
# Whomever decided this was a reasonable syntax should probably never
# breed.
# The following parsers combined with the process_declarator function
# allow us to turn a nest of type modifiers into a correctly
# ordered list of modifiers.
self.declarator = Forward()
self.abstract_declarator = Forward()
# Abstract declarators look like:
# <empty string>
# *
# **[num]
# (*)(int, int)
# *( )(int, int)[10]
# ...etc...
self.abstract_declarator << Group(
type_qualifier('first_typequal') +
Group(ZeroOrMore(Group(Suppress('*') + type_qualifier)))('ptrs') +
((Optional('&')('ref')) |
(lparen + self.abstract_declarator + rparen)('center')) +
Optional(lparen +
Optional(delimitedList(Group(
self.type_spec('type') +
self.abstract_declarator('decl') +
Optional(Literal('=').suppress() + expression,
default=None)('val')
)), default=None) +
rparen)('args') +
Group(ZeroOrMore(lbrack + Optional(expression, default='-1') +
rbrack))('arrays')
)
# Declarators look like:
# varName
# *varName
# **varName[num]
# (*fnName)(int, int)
# * fnName(int arg1=0)[10]
# ...etc...
self.declarator << Group(
type_qualifier('first_typequal') + call_conv +
Group(ZeroOrMore(Group(Suppress('*') + type_qualifier)))('ptrs') +
((Optional('&')('ref') + ident('name')) |
(lparen + self.declarator + rparen)('center')) +
Optional(lparen +
Optional(delimitedList(
Group(self.type_spec('type') +
(self.declarator |
self.abstract_declarator)('decl') +
Optional(Literal('=').suppress() +
expression, default=None)('val')
)),
default=None) +
rparen)('args') +
Group(ZeroOrMore(lbrack + Optional(expression, default='-1') +
rbrack))('arrays')
)
self.declarator_list = Group(delimitedList(self.declarator))
# Typedef
self.type_decl = (Keyword('typedef') + self.type_spec('type') +
self.declarator_list('decl_list') + semi)
self.type_decl.setParseAction(self.process_typedef)
# Variable declaration
self.variable_decl = (
Group(storage_class_spec +
self.type_spec('type') +
Optional(self.declarator_list('decl_list')) +
Optional(Literal('=').suppress() +
(expression('value') |
(lbrace +
Group(delimitedList(expression))('array_values') +
rbrace
)
)
)
) +
semi)
self.variable_decl.setParseAction(self.process_variable)
# Function definition
self.typeless_function_decl = (self.declarator('decl') +
nestedExpr('{', '}').suppress())
self.function_decl = (storage_class_spec +
self.type_spec('type') +
self.declarator('decl') +
nestedExpr('{', '}').suppress())
self.function_decl.setParseAction(self.process_function)
# Struct definition
self.struct_decl = Forward()
struct_kw = (Keyword('struct') | Keyword('union'))
self.struct_member = (
Group(self.variable_decl.copy().setParseAction(lambda: None)) |
# Hack to handle bit width specification.
Group(Group(self.type_spec('type') +
Optional(self.declarator_list('decl_list')) +
colon + integer('bit') + semi)) |
(self.type_spec + self.declarator +
nestedExpr('{', '}')).suppress() |
(self.declarator + nestedExpr('{', '}')).suppress()
)
self.decl_list = (lbrace +
Group(OneOrMore(self.struct_member))('members') +
rbrace)
self.struct_type << (struct_kw('struct_type') +
((Optional(ident)('name') +
self.decl_list) | ident('name'))
)
self.struct_type.setParseAction(self.process_struct)
self.struct_decl = self.struct_type + semi
# Enum definition
enum_var_decl = Group(ident('name') +
Optional(Literal('=').suppress() +
(integer('value') | ident('valueName'))))
self.enum_type << (Keyword('enum') +
(Optional(ident)('name') +
lbrace +
Group(delimitedList(enum_var_decl))('members') +
Optional(comma) + rbrace | ident('name'))
)
self.enum_type.setParseAction(self.process_enum)
self.enum_decl = self.enum_type + semi
self.parser = (self.type_decl | self.variable_decl |
self.function_decl)
return self.parser
def process_declarator(self, decl):
"""Process a declarator (without base type) and return a tuple
(name, [modifiers])
See process_type(...) for more information.
"""
toks = []
quals = [tuple(decl.get('first_typequal', []))]
name = None
logger.debug("DECL: {}".format(decl))
if 'call_conv' in decl and len(decl['call_conv']) > 0:
toks.append(decl['call_conv'])
quals.append(None)
if 'ptrs' in decl and len(decl['ptrs']) > 0:
toks += ('*',) * len(decl['ptrs'])
quals += map(tuple, decl['ptrs'])
if 'arrays' in decl and len(decl['arrays']) > 0:
toks.extend([self.eval_expr(x)] for x in decl['arrays'])
quals += [()] * len(decl['arrays'])
if 'args' in decl and len(decl['args']) > 0:
if decl['args'][0] is None:
toks.append(())
else:
toks.append(tuple([self.process_type(a['type'],
a['decl']) +
(a['val'][0],) for a in decl['args']]
)
)
quals.append(())
if 'ref' in decl:
toks.append('&')
quals.append(())
if 'center' in decl:
(n, t, q) = self.process_declarator(decl['center'][0])
if n is not None:
name = n
toks.extend(t)
quals = quals[:-1] + [quals[-1] + q[0]] + list(q[1:])
if 'name' in decl:
name = decl['name']
return (name, toks, tuple(quals))
def process_type(self, typ, decl):
"""Take a declarator + base type and return a serialized name/type
description.
The description will be a list of elements (name, [basetype, modifier,
modifier, ...]):
- name is the string name of the declarator or None for an abstract
declarator
- basetype is the string representing the base type
- modifiers can be:
- '*' : pointer (multiple pointers "***" allowed)
- '&' : reference
- '__X' : calling convention (windows only). X can be 'cdecl' or
'stdcall'
- list : array. Value(s) indicate the length of each array, -1
for incomplete type.
- tuple : function, items are the output of processType for each
function argument.
Examples:
- int *x[10] => ('x', ['int', [10], '*'])
- char fn(int x) => ('fn', ['char', [('x', ['int'])]])
- struct s (*)(int, int*) =>
(None, ["struct s", ((None, ['int']), (None, ['int', '*'])), '*'])
"""
logger.debug("PROCESS TYPE/DECL: {}/{}".format(typ['name'], decl))
(name, decl, quals) = self.process_declarator(decl)
pre_typequal = tuple(typ.get('pre_qual', []))
return (name, Type(typ['name'], *decl,
type_quals=(pre_typequal + quals[0],) + quals[1:]))
def process_enum(self, s, l, t):
"""
"""
try:
logger.debug("ENUM: {}".format(t))
if t.name == '':
n = 0
while True:
name = 'anon_enum{}'.format(n)
if name not in self.defs['enums']:
break
n += 1
else:
name = t.name[0]
logger.debug(" name: {}".format(name))
if name not in self.defs['enums']:
i = 0
enum = {}
for v in t.members:
if v.value != '':
i = literal_eval(v.value)
if v.valueName != '':
i = enum[v.valueName]
enum[v.name] = i
self.add_def('values', v.name, i)
i += 1
logger.debug(" members: {}".format(enum))
self.add_def('enums', name, enum)
self.add_def('types', 'enum '+name, Type('enum', name))
return ('enum ' + name)
except:
logger.exception("Error processing enum: {}".format(t))
def process_function(self, s, l, t):
"""Build a function definition from the parsing tokens.
"""
logger.debug("FUNCTION {} : {}".format(t, t.keys()))
try:
(name, decl) = self.process_type(t.type, t.decl[0])
if len(decl) == 0 or type(decl[-1]) != tuple:
logger.error('{}'.format(t))
mess = "Incorrect declarator type for function definition."
raise DefinitionError(mess)
logger.debug(" name: {}".format(name))
logger.debug(" sig: {}".format(decl))
self.add_def('functions', name, decl.add_compatibility_hack())
except Exception:
logger.exception("Error processing function: {}".format(t))
def packing_at(self, line):
"""Return the structure packing value at the given line number.
"""
packing = None
for p in self.pack_list[self.current_file]:
if p[0] <= line:
packing = p[1]
else:
break
return packing
def process_struct(self, s, l, t):
"""
"""
try:
str_typ = t.struct_type # struct or union
# Check for extra packing rules
packing = self.packing_at(lineno(l, s))
logger.debug('{} {} {}'.format(str_typ.upper(), t.name, t))
if t.name == '':
n = 0
while True:
sname = 'anon_{}{}'.format(str_typ, n)
if sname not in self.defs[str_typ+'s']:
break
n += 1
else:
if istext(t.name):
sname = t.name
else:
sname = t.name[0]
logger.debug(" NAME: {}".format(sname))
if (len(t.members) > 0 or sname not in self.defs[str_typ+'s'] or
self.defs[str_typ+'s'][sname] == {}):
logger.debug(" NEW " + str_typ.upper())
struct = []
for m in t.members:
typ = m[0].type
val = self.eval_expr(m[0].value)
logger.debug(" member: {}, {}, {}".format(
m, m[0].keys(), m[0].decl_list))
if len(m[0].decl_list) == 0: # anonymous member
member = [None, Type(typ[0]), None]
if m[0].bit:
member.append(int(m[0].bit))
struct.append(tuple(member))
for d in m[0].decl_list:
(name, decl) = self.process_type(typ, d)
member = [name, decl, val]
if m[0].bit:
member.append(int(m[0].bit))
struct.append(tuple(member))
logger.debug(" {} {} {} {}".format(name, decl,
val, m[0].bit))
str_cls = (Struct if str_typ == 'struct' else Union)
self.add_def(str_typ + 's', sname,
str_cls(*struct, pack=packing))
self.add_def('types', str_typ+' '+sname, Type(str_typ, sname))
return str_typ + ' ' + sname
except Exception:
logger.exception('Error processing struct: {}'.format(t))
def process_variable(self, s, l, t):
"""
"""
logger.debug("VARIABLE: {}".format(t))
try:
val = self.eval_expr(t[0])
for d in t[0].decl_list:
(name, typ) = self.process_type(t[0].type, d)
# This is a function prototype
if type(typ[-1]) is tuple:
logger.debug(" Add function prototype: {} {} {}".format(
name, typ, val))
self.add_def('functions', name,
typ.add_compatibility_hack())
# This is a variable
else:
logger.debug(" Add variable: {} {} {}".format(name,
typ, val))
self.add_def('variables', name, (val, typ))
self.add_def('values', name, val)
except Exception:
logger.exception('Error processing variable: {}'.format(t))
def process_typedef(self, s, l, t):
"""
"""
logger.debug("TYPE: {}".format(t))
typ = t.type
for d in t.decl_list:
(name, decl) = self.process_type(typ, d)
logger.debug(" {} {}".format(name, decl))
self.add_def('types', name, decl)
# --- Utility methods
def eval_expr(self, toks):
"""Evaluates expressions.
Currently only works for expressions that also happen to be valid
python expressions.
"""
logger.debug("Eval: {}".format(toks))
try:
if istext(toks) or isbytes(toks):
val = self.eval(toks, None, self.defs['values'])
elif toks.array_values != '':
val = [self.eval(x, None, self.defs['values'])
for x in toks.array_values]
elif toks.value != '':
val = self.eval(toks.value, None, self.defs['values'])
else:
val = None
return val
except Exception:
logger.debug(" failed eval {} : {}".format(toks, format_exc()))
return None
def eval(self, expr, *args):
"""Just eval with a little extra robustness."""
expr = expr.strip()
cast = (lparen + self.type_spec + self.abstract_declarator +
rparen).suppress()
expr = (quotedString | number | cast).transformString(expr)
if expr == '':
return None
return eval(expr, *args)
def add_def(self, typ, name, val):
"""Add a definition of a specific type to both the definition set for
the current file and the global definition set.
"""
self.defs[typ][name] = val
if self.current_file is None:
base_name = None
else:
base_name = os.path.basename(self.current_file)
if base_name not in self.file_defs:
self.file_defs[base_name] = {}
for k in self.data_list:
self.file_defs[base_name][k] = {}
self.file_defs[base_name][typ][name] = val
def rem_def(self, typ, name):
"""Remove a definition of a specific type to both the definition set
for the current file and the global definition set.
"""
if self.current_file is None:
base_name = None
else:
base_name = os.path.basename(self.current_file)
del self.defs[typ][name]
del self.file_defs[base_name][typ][name]
def is_fund_type(self, typ):
"""Return True if this type is a fundamental C type, struct, or
union.
**ATTENTION: This function is legacy and should be replaced by
Type.is_fund_type()**
"""
return Type(typ).is_fund_type()
def eval_type(self, typ):
"""Evaluate a named type into its fundamental type.
**ATTENTION: This function is legacy and should be replaced by
Type.eval()**
"""
if not isinstance(typ, Type):
typ = Type(*typ)
return typ.eval(self.defs['types'])
def find(self, name):
"""Search all definitions for the given name.
"""
res = []
for f in self.file_defs:
fd = self.file_defs[f]
for t in fd:
typ = fd[t]
for k in typ:
if istext(name):
if k == name:
res.append((f, t))
else:
if re.match(name, k):
res.append((f, t, k))
return res
def find_text(self, text):
"""Search all file strings for text, return matching lines.
"""
res = []
for f in self.files:
l = self.files[f].split('\n')
for i in range(len(l)):
if text in l[i]:
res.append((f, i, l[i]))
return res
# --- Basic parsing elements.
def kwl(strs):
"""Generate a match-first list of keywords given a list of strings."""
return Regex(r'\b({})\b'.format('|'.join(strs)))
def flatten(lst):
res = []
for i in lst:
if isinstance(i, (list, tuple)):
res.extend(flatten(i))
else:
res.append(str(i))
return res
def recombine(tok):
"""Flattens a tree of tokens and joins into one big string.
"""
return " ".join(flatten(tok.asList()))
def print_parse_results(pr, depth=0, name=''):
"""For debugging; pretty-prints parse result objects.
"""
start = name + " " * (20 - len(name)) + ':' + '..' * depth
if isinstance(pr, ParseResults):
print(start)
for i in pr:
name = ''
for k in pr.keys():
if pr[k] is i:
name = k
break
print_parse_results(i, depth+1, name)
else:
print(start + str(pr))
# Syntatic delimiters
comma = Literal(",").ignore(quotedString).suppress()
colon = Literal(":").ignore(quotedString).suppress()
semi = Literal(";").ignore(quotedString).suppress()
lbrace = Literal("{").ignore(quotedString).suppress()
rbrace = Literal("}").ignore(quotedString).suppress()
lbrack = Literal("[").ignore(quotedString).suppress()
rbrack = Literal("]").ignore(quotedString).suppress()
lparen = Literal("(").ignore(quotedString).suppress()
rparen = Literal(")").ignore(quotedString).suppress()
# Numbers
int_strip = lambda t: t[0].rstrip('UL')
hexint = Regex('[+-]?\s*0[xX][{}]+[UL]*'.format(hexnums)).setParseAction(int_strip)
decint = Regex('[+-]?\s*[0-9]+[UL]*').setParseAction(int_strip)
integer = (hexint | decint)
# The floating regex is ugly but it is because we do not want to match
# integer to it.
floating = Regex(r'[+-]?\s*((((\d(\.\d*)?)|(\.\d+))[eE][+-]?\d+)|((\d\.\d*)|(\.\d+)))')
number = (floating | integer)
# Miscelaneous
bi_operator = oneOf("+ - / * | & || && ! ~ ^ % == != > < >= <= -> . :: << >> = ? :")
uni_right_operator = oneOf("++ --")
uni_left_operator = oneOf("++ -- - + * sizeof new")
wordchars = alphanums+'_$'
name = (WordStart(wordchars) + Word(alphas+"_", alphanums+"_$") +
WordEnd(wordchars))
size_modifiers = ['short', 'long']
sign_modifiers = ['signed', 'unsigned']
# Syntax elements defined by _init_parser.
expression = Forward()
array_op = lbrack + expression + rbrack
base_types = None
ident = None
call_conv = None
type_qualifier = None
storage_class_spec = None
extra_modifier = None
fund_type = None
extra_type_list = []
num_types = ['int', 'float', 'double']
nonnum_types = ['char', 'bool', 'void']
# Define some common language elements when initialising.
def _init_cparser(extra_types=None, extra_modifiers=None):
global expression
global call_conv, ident
global base_types
global type_qualifier, storage_class_spec, extra_modifier
global fund_type
global extra_type_list
# Some basic definitions
extra_type_list = [] if extra_types is None else list(extra_types)
base_types = nonnum_types + num_types + extra_type_list
storage_classes = ['inline', 'static', 'extern']
qualifiers = ['const', 'volatile', 'restrict', 'near', 'far']
keywords = (['struct', 'enum', 'union', '__stdcall', '__cdecl'] +
qualifiers + base_types + size_modifiers + sign_modifiers)
keyword = kwl(keywords)
wordchars = alphanums+'_$'
ident = (WordStart(wordchars) + ~keyword +
Word(alphas + "_", alphanums + "_$") +
WordEnd(wordchars)).setParseAction(lambda t: t[0])
call_conv = Optional(Keyword('__cdecl') |
Keyword('__stdcall'))('call_conv')
# Removes '__name' from all type specs. may cause trouble.
underscore_2_ident = (WordStart(wordchars) + ~keyword + '__' +
Word(alphanums, alphanums+"_$") +
WordEnd(wordchars)).setParseAction(lambda t: t[0])
type_qualifier = ZeroOrMore((underscore_2_ident + Optional(nestedExpr())) |
kwl(qualifiers))
storage_class_spec = Optional(kwl(storage_classes))
if extra_modifiers:
extra_modifier = ZeroOrMore(kwl(extra_modifiers) +
Optional(nestedExpr())).suppress()
else:
extra_modifier = None
# Language elements
fund_type = OneOrMore(kwl(sign_modifiers + size_modifiers +
base_types)).setParseAction(lambda t: ' '.join(t))
# Is there a better way to process expressions with cast operators??
cast_atom = (
ZeroOrMore(uni_left_operator) + Optional('('+ident+')').suppress() +
((ident + '(' + Optional(delimitedList(expression)) + ')' |
ident + OneOrMore('[' + expression + ']') |
ident | number | quotedString
) |
('(' + expression + ')')) +
ZeroOrMore(uni_right_operator)
)
# XXX Added name here to catch macro functions on types
uncast_atom = (
ZeroOrMore(uni_left_operator) +
((ident + '(' + Optional(delimitedList(expression)) + ')' |
ident + OneOrMore('[' + expression + ']') |
ident | number | name | quotedString
) |
('(' + expression + ')')) +
ZeroOrMore(uni_right_operator)
)
atom = cast_atom | uncast_atom
expression << Group(atom + ZeroOrMore(bi_operator + atom))
expression.setParseAction(recombine)
| mrh1997/pyclibrary | pyclibrary/c_parser.py | Python | mit | 62,500 |
''' This file contains tests for the bar plot.
'''
import matplotlib.pyplot as plt
import pytest
import shap
from .utils import explainer # (pytest fixture do not remove) pylint: disable=unused-import
@pytest.mark.mpl_image_compare
def test_simple_bar(explainer): # pylint: disable=redefined-outer-name
""" Check that the bar plot is unchanged.
"""
shap_values = explainer(explainer.data)
fig = plt.figure()
shap.plots.bar(shap_values, show=False)
plt.tight_layout()
return fig
| slundberg/shap | tests/plots/test_bar.py | Python | mit | 509 |
# -*- coding: utf-8 -*-
from datetime import timedelta, datetime
import asyncio
import random
from .api import every, once_at, JobSchedule, default_schedule_manager
__all__ = ['every_day', 'every_week', 'every_monday', 'every_tuesday', 'every_wednesday',
'every_thursday', 'every_friday', 'every_saturday', 'every_sunday',
'once_at_next_monday', 'once_at_next_tuesday', 'once_at_next_wednesday',
'once_at_next_thursday', 'once_at_next_friday', 'once_at_next_saturday',
'once_at_next_sunday', 'every_random_interval']
def every_random_interval(job, interval: timedelta, loop=None):
"""
executes the job randomly once in the specified interval.
example:
run a job every day at random time
run a job every hour at random time
:param job: a callable(co-routine function) which returns
a co-routine or a future or an awaitable
:param interval: the interval can also be given in the format of datetime.timedelta,
then seconds, minutes, hours, days, weeks parameters are ignored.
:param loop: io loop if the provided job is a custom future linked up
with a different event loop.
:return: schedule object, so it could be cancelled at will of the user by
aschedule.cancel(schedule)
"""
if loop is None:
loop = asyncio.get_event_loop()
start = loop.time()
def wait_time_gen():
count = 0
while True:
rand = random.randrange(round(interval.total_seconds()))
tmp = round(start + interval.total_seconds() * count + rand - loop.time())
yield tmp
count += 1
schedule = JobSchedule(job, wait_time_gen(), loop=loop)
# add it to default_schedule_manager, so that user can aschedule.cancel it
default_schedule_manager.add_schedule(schedule)
return schedule
def every_day(job, loop=None):
return every(job, timedelta=timedelta(days=1), loop=loop)
def every_week(job, loop=None):
return every(job, timedelta=timedelta(days=7), loop=loop)
every_monday = lambda job, loop=None: _every_weekday(job, 0, loop=loop)
every_tuesday = lambda job, loop=None: _every_weekday(job, 1, loop=loop)
every_wednesday = lambda job, loop=None: _every_weekday(job, 2, loop=loop)
every_thursday = lambda job, loop=None: _every_weekday(job, 3, loop=loop)
every_friday = lambda job, loop=None: _every_weekday(job, 4, loop=loop)
every_saturday = lambda job, loop=None: _every_weekday(job, 5, loop=loop)
every_sunday = lambda job, loop=None: _every_weekday(job, 6, loop=loop)
once_at_next_monday = lambda job, loop=None: _once_at_weekday(job, 0, loop=loop)
once_at_next_tuesday = lambda job, loop=None: _once_at_weekday(job, 1, loop=loop)
once_at_next_wednesday = lambda job, loop=None: _once_at_weekday(job, 2, loop=loop)
once_at_next_thursday = lambda job, loop=None: _once_at_weekday(job, 3, loop=loop)
once_at_next_friday = lambda job, loop=None: _once_at_weekday(job, 4, loop=loop)
once_at_next_saturday = lambda job, loop=None: _once_at_weekday(job, 5, loop=loop)
once_at_next_sunday = lambda job, loop=None: _once_at_weekday(job, 6, loop=loop)
def _nearest_weekday(weekday):
return datetime.now() + timedelta(days=(weekday - datetime.now().weekday()) % 7)
def _every_weekday(job, weekday, loop=None):
return every(job, timedelta=timedelta(days=7), start_at=_nearest_weekday(weekday), loop=loop)
def _once_at_weekday(job, weekday, loop=None):
return once_at(job, _nearest_weekday(weekday), loop=loop)
| eightnoteight/aschedule | aschedule/ext.py | Python | mit | 3,564 |
# -*- coding: utf-8 -*-
"""
This module provides the necessary methods for a Certification Authority.
For creating the self signed Certificate for the CA, use the following command:
$ openssl req -x509 -newkey rsa:2048 -keyout ca_priv.pem -out ca_cert.pem
@author: Vasco Santos
"""
import time
from M2Crypto import X509, RSA, EVP, BIO, ASN1
class CertificationAuthority(object):
""" Class responsible for keeping the CA self-signed certificate,
as well as, its private key.
"""
def __init__(self, cert, priv_key, passphrase):
""" Create a Certification Authority Object.
Arguments:
cert: file system path of the CA's self-signed certificate.
priv_key: file system path of the CA's private key (encrypted).
passphrase: Symmetric key for priv_key decryption.
"""
def getPassphrase(*args):
""" Callback for private key decrypting.
"""
return str(passphrase.encode('utf-8'))
self.cert = X509.load_cert(cert.encode('utf-8'))
self.priv_key = RSA.load_key(priv_key.encode('utf-8'), getPassphrase)
# Private key for signing
self.signEVP = EVP.PKey()
self.signEVP.assign_rsa(self.priv_key)
def createSignedCertificate(self, subj_id, pub_key, expiration_time):
""" Create a certificate for a subject public key, signed by the CA.
Arguments:
subj_id: certificate subject identifier.
pub_key: public key of the subject.
expiration_time: certificate life time.
Returns:
Certificate in PEM Format.
"""
# Public Key to certificate
bio = BIO.MemoryBuffer(str(pub_key.decode('hex')))
pub_key = RSA.load_pub_key_bio(bio)
pkey = EVP.PKey()
pkey.assign_rsa(pub_key)
# Certificate Fields
cur_time = ASN1.ASN1_UTCTIME()
cur_time.set_time(int(time.time()))
expire_time = ASN1.ASN1_UTCTIME()
expire_time.set_time(int(time.time()) + expiration_time * 60) # In expiration time minutes
# Certification Creation
cert = X509.X509()
cert.set_pubkey(pkey)
s_name = X509.X509_Name()
s_name.C = "PT"
s_name.CN = str(subj_id)
cert.set_subject(s_name)
i_name = X509.X509_Name()
i_name.C = "PT"
i_name.CN = "Register Server"
cert.set_issuer_name(i_name)
cert.set_not_before(cur_time)
cert.set_not_after(expire_time)
cert.sign(self.signEVP, md="sha1")
#cert.save_pem("peer_CA.pem")
return cert.as_pem().encode('hex')
def decryptData(self, data):
""" Decrypt the intended data with the entity private key.
Arguments:
data: data to be decrypted.
"""
return self.priv_key.private_decrypt(data.decode('base64'), RSA.pkcs1_padding)
def encryptData(self, data, certificate):
""" Encrypt the intended data with the public key contained in the certificate.
Arguments:
data: data to be encrypted.
certificate: subject certificate.
"""
cert = X509.load_cert_string(certificate.decode('hex'))
return cert.get_pubkey().get_rsa().public_encrypt(str(data), RSA.pkcs1_padding).encode('base64')
def getPublicKey(self):
""" Get the CA Public Key.
Returns:
CA Public Key in PEM Format.
"""
return self.cert.get_pubkey().get_rsa().as_pem().encode('hex')
def signData(self, data):
""" Sign a received String.
Arguments:
data: string to sign.
Returns:
signature of the received data.
"""
msgDigest = EVP.MessageDigest('sha1')
msgDigest.update(str(data))
self.signEVP.sign_init()
self.signEVP.sign_update(msgDigest.digest())
return self.signEVP.sign_final().encode('base64')
def signEncryptedData(self, cipherData):
""" Sign encrypted data.
Arguments:
cipherData: data encrypted (base64 format).
"""
msgDigest = EVP.MessageDigest('sha1')
msgDigest.update(cipherData.decode('base64'))
self.signEVP.sign_init()
self.signEVP.sign_update(msgDigest.digest())
return self.signEVP.sign_final().encode('base64')
def validCertificate(self, certificate):
""" Verify if a certificate of a subject was issued by this CA.
Arguments:
certificate: subject certificate.
Returns:
true if the certificate was issued by this CA. false otherwise.
"""
cert = X509.load_cert_string(certificate.decode('hex'))
# Data Analysis
# Subject confirmation
return cert.verify(self.cert.get_pubkey())
def validSelfSignedCertificate(self):
""" Verify if the self-signed CA certificate was not corrupted.
Returns:
true if the self signed certificate is valid, false otherwise.
"""
return self.cert.check_ca() and self.cert.verify(self.cert.get_pubkey())
def validSignedData(self, data, signature, certificate):
""" Verify if the received data was signed by the owner of the certificate.
Arguments:
data: received data.
signature: digital signature of the data.
certificate: certificate of the data issuer.
Returns:
true if the data maintains its integrity, false otherwise.
"""
msgDigest = EVP.MessageDigest('sha1')
msgDigest.update(str(data))
pub_key = X509.load_cert_string(certificate.decode('hex')).get_pubkey().get_rsa()
verifyEVP = EVP.PKey()
verifyEVP.assign_rsa(pub_key)
verifyEVP.verify_init()
verifyEVP.verify_update(msgDigest.digest())
return verifyEVP.verify_final(str(signature.decode('base64')))
def validSignedEncryptedData(self, cipherData, signature, certificate):
""" Verify if the received data was signed by the owner of the certificate.
Arguments:
cipherData: data encrypted (base64 format).
signature: digital signature of the data.
certificate: certificate of the data issuer.
Returns:
true if the data maintains its integrity, false otherwise.
"""
msgDigest = EVP.MessageDigest('sha1')
msgDigest.update(cipherData.decode('base64'))
pub_key = X509.load_cert_string(certificate.decode('hex')).get_pubkey().get_rsa()
verifyEVP = EVP.PKey()
verifyEVP.assign_rsa(pub_key)
verifyEVP.verify_init()
verifyEVP.verify_update(msgDigest.digest())
return verifyEVP.verify_final(str(signature.decode('base64')))
| vasco-santos/CertificationService | certModule/CertAuthority.py | Python | mit | 6,803 |
from baby_steps import given, then, when
from district42 import represent, schema
def test_list_of_representation():
with given:
sch = schema.list(schema.bool)
with when:
res = represent(sch)
with then:
assert res == "schema.list(schema.bool)"
def test_list_of_values_representation():
with given:
sch = schema.list(schema.int(1))
with when:
res = represent(sch)
with then:
assert res == "schema.list(schema.int(1))"
def test_list_of_repr_values_representation():
with given:
sch = schema.list(schema.str("banana"))
with when:
res = represent(sch)
with then:
assert res == "schema.list(schema.str('banana'))"
def test_list_of_len_representation():
with given:
sch = schema.list(schema.int).len(10)
with when:
res = represent(sch)
with then:
assert res == "schema.list(schema.int).len(10)"
def test_list_of_min_len_representation():
with given:
sch = schema.list(schema.int).len(1, ...)
with when:
res = represent(sch)
with then:
assert res == "schema.list(schema.int).len(1, ...)"
def test_list_of_max_len_representation():
with given:
sch = schema.list(schema.int).len(..., 10)
with when:
res = represent(sch)
with then:
assert res == "schema.list(schema.int).len(..., 10)"
def test_list_of_min_max_len_representation():
with given:
sch = schema.list(schema.int).len(1, 10)
with when:
res = represent(sch)
with then:
assert res == "schema.list(schema.int).len(1, 10)"
| nikitanovosibirsk/district42 | tests/list/test_list_of_representation.py | Python | mit | 1,650 |
import cx_Freeze
import sys
import os
executables = [cx_Freeze.Executable("MusicCompiler.py", base=None)]
cx_Freeze.setup(
name= "MusicCompiler",
description = "Best Program Ever Known To Humanity.",
author = "Space Sheep Enterprises",
options = {"build_exe":{"excludes":["urllib","html","http","tkinter","socket","multiprocessing","threading","email","htmllib"]}},
version = "1.0",
executables = executables
)
| Jonathan-Z/PowerShellMusic | setup.py | Python | mit | 457 |
#!/Users/Varun/Documents/GitHub/LockScreen/venv/bin/python
# $Id: rst2xml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing Docutils XML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates Docutils-native XML from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='xml', description=description)
| LockScreen/Backend | venv/bin/rst2xml.py | Python | mit | 642 |
from messenger import Skype
import keyring
import utils
token = keyring.get_password('messagesReceiver', 'skypeToken')
registrationToken = keyring.get_password('messagesReceiver', 'skypeRegistrationToken')
username = keyring.get_password('messagesReceiver', 'skypeUsername')
password = keyring.get_password('messagesReceiver', 'skypePassword')
s = Skype(token, registrationToken)
if s.token == None:
s.login(username, password)
print "logging in..."
if s.registrationToken == None:
print s.createRegistrationToken()
print s.subcribe()
print "creating endpoint and registrationToken..."
while True:
data = s.pull()
if data == 404:
print s.createRegistrationToken()
print s.subcribe()
data = s.pull()
if data == 400:
continue
messages = utils.skypeParse(data)
if not messages:
continue
for sender, receiver, message in messages:
if receiver != None:
print "%s to %s" % (sender, receiver)
else:
print "From %s" % sender
print message
| khapota/messages-terminal | test.py | Python | mit | 1,064 |
#!/usr/bin/env python
import os
import sys # provides interaction with the Python interpreter
from functools import partial
from PyQt4 import QtGui # provides the graphic elements
from PyQt4.QtCore import Qt # provides Qt identifiers
from PyQt4.QtGui import QPushButton
try:
from sh import inxi
except:
print(" 'inxi' not found, install it to get this info")
try:
from sh import mhwd
except:
print(" 'mhwd' not found, this is not Manjaro?")
try:
from sh import hwinfo
except:
print(" 'hwinfo' not found")
try:
from sh import free
except:
print(" 'free' not found")
try:
from sh import lsblk
except:
print(" 'lsblk' not found")
try:
from sh import df
except:
print(" 'df' not found")
try:
from sh import blockdev
except:
print(" 'blockdev' not found")
try:
from sh import test
except:
print(" 'test' not found")
try:
from sh import parted
except:
print(" 'parted' not found")
TMP_FILE = "/tmp/mlogsout.txt"
HEADER = '''
===================
|{:^17}| {}
===================
'''
checkbuttons = [
'Inxi',
'Installed g. drivers',
'List all g. drivers',
'Graphic Card Info',
'Memory Info',
'Partitions',
'Free Disk Space',
'Xorg.0',
'Xorg.1',
'pacman.log',
'journalctl - Emergency',
'journalctl - Alert',
'journalctl - Critical',
'journalctl - Failed',
'Open&Rc - rc.log',
]
def look_in_file(file_name, kws):
"""reads a file and returns only the lines that contain one of the keywords"""
with open(file_name) as f:
return "".join(filter(lambda line: any(kw in line for kw in kws), f))
class Window(QtGui.QWidget):
def __init__(self, parent=None):
super(Window, self).__init__(parent)
self.checks = [False]*len(checkbuttons) # initialize all buttons to False
# creates a vertical box layout for the window
vlayout = QtGui.QVBoxLayout()
# creates the checkboxes
for idx, text in enumerate(checkbuttons):
checkbox = QtGui.QCheckBox(text)
# connects the 'stateChanged()' signal with the 'checkbox_state_changed()' slot
checkbox.stateChanged.connect(partial(self.checkbox_state_changed, idx))
vlayout.addWidget(checkbox) # adds the checkbox to the layout
btn = QPushButton("&Show Info ({})".format(TMP_FILE), self)
btn.clicked.connect(self.to_computer)
btn.clicked.connect(self.to_editor)
vlayout.addWidget(btn)
vlayout.addStretch()
self.setLayout(vlayout) # sets the window layout
def checkbox_state_changed(self, idx, state):
self.checks[idx] = state == Qt.Checked
def to_computer(self, text):
f = open(TMP_FILE, 'w') # write mode clears any previous content from the file if it exists
if self.checks[0]:
print("Saving: inxi to file")
f.write(HEADER.format("Inxi -Fxzc0", "Listing computer information"))
try:
f.write(str(inxi('-Fxxxzc0')))
except:
" 'inxi' not found, install it to get this info"
f.write('\n')
if self.checks[1]:
print("Getting info about installed graphical driver")
f.write(HEADER.format("Installed drivers", "Shows which graphic driver is installed"))
try:
f.write(str(mhwd('-li')))
except:
print(" 'mhwd' not found, this is not Manjaro?")
f.write('\n')
if self.checks[2]:
print("Getting list of all drivers supported on detected gpu's")
f.write(HEADER.format("Available drivers", "list of all drivers supported on detected gpu's"))
try:
f.write(str(mhwd('-l')))
except:
print(" 'mhwd' not found, this is not Manjaro?")
# f.write('\n')
if self.checks[3]:
print('hwinfo -graphic card')
# os.system('hwinfo --gfxcard')
f.write(HEADER.format("hwinfo --gfxcard", "Show Graphic Card info"))
try:
f.write(str(hwinfo('--gfxcard')))
except:
print('hwinfo graphic card info error')
f.write('hwinfo graphic card info error')
f.write('\n')
if self.checks[4]:
print('memory info')
# os.system('free -h')
f.write(HEADER.format("Memory Info", "Info about Memory and Swap"))
try:
f.write(str(free(' -h')))
except:
print('memory info error')
f.write('memory info error')
f.write('\n')
if self.checks[5]:
print('disk info')
# os.system('lsblk')
f.write(HEADER.format("Disk Info", "Disks and Partitions"))
try:
f.write(str(lsblk()))
except:
print('lsblk error')
f.write('lsblk error')
f.write('\n')
if self.checks[6]:
print('free disk space')
# os.system('df')
f.write(HEADER.format("Free Disk Space", "Free space per pertition"))
try:
f.write(str(df()))
except:
print('free disk space error')
f.write('free disk space error')
f.write('\n')
if self.checks[9]:
print("Saving: Xorg.0.log to file")
f.write(HEADER.format("Xorg.0.log", "searching for: failed, error & (WW) keywords"))
try:
f.write(look_in_file('/var/log/Xorg.0.log', ['failed', 'error', '(WW)']))
except FileNotFoundError:
print("/var/log/Xorg.0.log not found!")
f.write("Xorg.0.log not found!")
f.write('\n')
if self.checks[10]:
print("Saving: Xorg.1.log to file")
f.write(HEADER.format("Xorg.1.log", "searching for: failed, error & (WW) keywords"))
try:
f.write(look_in_file('/var/log/Xorg.1.log', ['failed', 'error', '(WW)']))
except FileNotFoundError:
print("/var/log/Xorg.1.log not found!")
f.write("Xorg.1.log not found!")
f.write('\n')
if self.checks[11]:
print("Saving: pacman.log to file")
f.write(HEADER.format("pacman.log", "searching for: pacsave, pacnew, pacorig keywords"))
try:
f.write(look_in_file('/var/log/pacman.log', ['pacsave', 'pacnew', 'pacorig']))
except FileNotFoundError:
print("/var/log/pacman.log not found, this is not Manjaro or Arch based Linux?")
f.write("pacman.log not found! Not Arch based OS?")
f.write('\n')
if self.checks[12]:
print("Saving: journalctl (emergency) to file")
os.system("journalctl -b > /tmp/journalctl.txt")
f.write(HEADER.format("journalctl.txt", "Searching for: Emergency keywords"))
f.write(look_in_file('/tmp/journalctl.txt', ['emergency', 'Emergency', 'EMERGENCY']))
f.write('\n')
if self.checks[13]:
print("Saving: journalctl (alert) to file")
os.system("journalctl -b > /tmp/journalctl.txt")
f.write(HEADER.format("journalctl.txt", "Searching for: Alert keywords"))
f.write(look_in_file('/tmp/journalctl.txt', ['alert', 'Alert', 'ALERT']))
f.write('\n')
if self.checks[14]:
print("Saving: journalctl (critical) to file")
os.system("journalctl -b > /tmp/journalctl.txt")
f.write(HEADER.format("journalctl.txt", "Searching for: Critical keywords"))
f.write(look_in_file('/tmp/journalctl.txt', ['critical', 'Critical', 'CRITICAL']))
f.write('\n')
if self.checks[15]:
print("Saving: journalctl (failed) to file")
os.system("journalctl -b > /tmp/journalctl.txt")
f.write(HEADER.format("journalctl.txt", "Searching for: Failed keywords"))
f.write(look_in_file('/tmp/journalctl.txt', ['failed', 'Failed', 'FAILED']))
f.write('\n')
if self.checks[16]:
print("Saving: rc.log to file")
f.write(HEADER.format("rc.log", "OpenRc only! searching for: WARNING: keywords"))
try:
f.write(look_in_file('/var/log/rc.log', ['WARNING:']))
except FileNotFoundError:
print("/var/log/rc.log not found! Systemd based OS?")
f.write("rc.log not found! Systemd based OS?")
f.write('\n')
f.close()
def to_editor(self):
os.system("xdg-open "+TMP_FILE)
# creates the application and takes arguments from the command line
application = QtGui.QApplication(sys.argv)
# creates the window and sets its properties
window = Window()
window.setWindowTitle('Manjaro Logs') # title
window.resize(280, 50) # size
window.show() # shows the window
# runs the application and waits for its return value at the end
sys.exit(application.exec_())
| AlManja/logs.py | logsgui3.py | Python | mit | 9,203 |
a = [1,2,3,4,5]
b = [2,3,4,5,6]
to_100=list(range(1,100))
print ("Printing B")
for i in a:
print i
print ("Printing A")
for i in b:
print i
print ("Print 100 elements")
for i in to_100:
print i | davidvillaciscalderon/PythonLab | Session 4/bucles.py | Python | mit | 211 |
from ga_starters import * | Drob-AI/music-queue-rec | src/playlistsRecomender/gaPlaylistGenerator/__init__.py | Python | mit | 25 |
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
'''
FeeFilterTest -- test processing of feefilter messages
'''
def hashToHex(hash):
return format(hash, '064x')
# Wait up to 60 secs to see if the testnode has received all the expected invs
def allInvsMatch(invsExpected, testnode):
for x in range(60):
with mininode_lock:
if (sorted(invsExpected) == sorted(testnode.txinvs)):
return True;
time.sleep(1)
return False;
# TestNode: bare-bones "peer". Used to track which invs are received from a node
# and to send the node feefilter messages.
class TestNode(SingleNodeConnCB):
def __init__(self):
SingleNodeConnCB.__init__(self)
self.txinvs = []
def on_inv(self, conn, message):
for i in message.inv:
if (i.type == 1):
self.txinvs.append(hashToHex(i.hash))
def clear_invs(self):
with mininode_lock:
self.txinvs = []
def send_filter(self, feerate):
self.send_message(msg_feefilter(feerate))
self.sync_with_ping()
class FeeFilterTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = False
def setup_network(self):
# Node1 will be used to generate txs which should be relayed from Node0
# to our test node
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-logtimemicros"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-logtimemicros"]))
connect_nodes(self.nodes[0], 1)
def run_test(self):
node1 = self.nodes[1]
node0 = self.nodes[0]
# Get out of IBD
node1.generate(1)
sync_blocks(self.nodes)
node0.generate(21)
sync_blocks(self.nodes)
# Setup the p2p connections and start up the network thread.
test_node = TestNode()
connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node)
test_node.add_connection(connection)
NetworkThread().start()
test_node.wait_for_verack()
# Test that invs are received for all txs at feerate of 20 sat/byte
node1.settxfee(Decimal("0.00020000"))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert(allInvsMatch(txids, test_node))
test_node.clear_invs()
# Set a filter of 15 sat/byte
test_node.send_filter(15000)
# Test that txs are still being received (paying 20 sat/byte)
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert(allInvsMatch(txids, test_node))
test_node.clear_invs()
# Change tx fee rate to 10 sat/byte and test they are no longer received
node1.settxfee(Decimal("0.00010000"))
[node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
sync_mempools(self.nodes) # must be sure node 0 has received all txs
# Send one transaction from node0 that should be received, so that we
# we can sync the test on receipt (if node1's txs were relayed, they'd
# be received by the time this node0 tx is received). This is
# unfortunately reliant on the current relay behavior where we batch up
# to 35 entries in an inv, which means that when this next transaction
# is eligible for relay, the prior transactions from node1 are eligible
# as well.
node0.settxfee(Decimal("0.00020000"))
txids = [node0.sendtoaddress(node0.getnewaddress(), 1)]
assert(allInvsMatch(txids, test_node))
test_node.clear_invs()
# Remove fee filter and check that txs are received again
test_node.send_filter(0)
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert(allInvsMatch(txids, test_node))
test_node.clear_invs()
if __name__ == '__main__':
FeeFilterTest().main()
| segwit/atbcoin-insight | qa/rpc-tests/p2p-feefilter.py | Python | mit | 4,317 |
""" TODO: Add docstring """
import re
import pexpect
class MediaObject(object):
"""Represents an encodable object"""
def __init__(self, input_filename, output_filename):
self.input_filename = input_filename
self.output_filename = output_filename
self.media_duration = self.get_media_duration()
# INFO: All other media information could potentially be put here too
def get_media_duration(self):
"""
Spawns an avprobe process to get the media duration.
Spawns an avprobe process and saves the output to a list, then uses
regex to find the duration of the media and return it as an integer.
"""
info_process = pexpect.spawn("/usr/bin/avprobe " + self.input_filename)
subprocess_output = info_process.readlines()
info_process.close
# Non-greedy match on characters 'Duration: ' followed by
# number in form 00:00:00:00
regex_group = re.compile(".*?Duration: .*?(\\d+):(\\d+):(\\d+).(\\d+)",
re.IGNORECASE | re.DOTALL)
# Exits as soon as duration is found
# PERF: Perform some tests to find the min number of lines
# certain not to contain the duration, then operate on a slice
# not containing those lines
for line in subprocess_output:
regex_match = regex_group.search(line)
if regex_match:
# Return the total duration in seconds
return ((int(regex_match.group(1)) * 3600) + # Hours
(int(regex_match.group(2)) * 60) + # Minutes
int(regex_match.group(3)) + # Seconds
# Round milliseconds to nearest second
1 if int(regex_match.group(3)) > 50 else 0)
# Not found so it's possible the process terminated early or an update
# broke the regex. Unlikely but we must return something just in case.
return -1
| thethomaseffect/travers-media-tools | traversme/encoder/media_object.py | Python | mit | 2,000 |
import random
import musictheory
import filezart
import math
from pydub import AudioSegment
from pydub.playback import play
class Part:
def __init__(self, typ=None, intensity=0, size=0, gen=0, cho=0):
self._type = typ #"n1", "n2", "bg", "ch", "ge"
if intensity<0 or gen<0 or cho<0 or size<0 or intensity>1 or size>1 or gen>1 or cho>1:
raise ValueError ("Invalid Values for Structure Part")
self._intensity = intensity # [0-1]
self._size = size # [0-1]
self._genover = gen # [0-1] overlay of general type lines
self._chover = cho # [0-1] overlay of chorus type lines
def __repr__(self):
return "[" + self._type + "-" + str(self._intensity) + "-" + str(self._size) + "-" + str(self._genover) + "-" + str(self._chover) + "]"
@classmethod
def fromString(cls, string): # [n1-0.123-1-0.321-0.2] type, intensity, size, genoverlay, chooverlay
while string[0] == " ":
string = string[1:]
while string[0] == "\n":
string = string[1:]
while string[-1] == " ":
string = string[:-1]
while string[-1] == "\0":
string = string[:-1]
while string[-1] == "\n":
string = string[:-1]
if len(string)<8:
raise ValueError("Invalid Part string: "+string)
if string[0] == "[" and string[-1] == "]":
string = string[1:-1]
else:
raise ValueError("Invalid Part string: "+string)
typ = string[:2]
string = string[3:]
if not typ in ("n1", "n2", "bg", "ch", "ge"):
raise ValueError("Invalid Part Type string: "+typ)
valstrings = str.split(string, "-")
inten = eval(valstrings[0])
size = eval(valstrings[1])
gen = eval(valstrings[2])
cho = eval(valstrings[3])
return cls(typ, inten, size, gen, cho)
def getTheme(self, pal):
if self._type == "n1":
return pal._n1
if self._type == "n2":
return pal._n2
if self._type == "bg":
return pal._bg
if self._type == "ch":
return pal._ch
if self._type == "ge":
return pal._ge
def getAudio(self, pal, bpm):
base = self.baseDur(pal, bpm)
total = base + 3000 #extra time for last note to play
nvoic = math.ceil(self._intensity * self.getTheme(pal).countVoices())
try:
ngeno = math.ceil(self._genover * pal._ge.countVoices())
except:
ngeno = 0
try:
nchoo = math.ceil(self._chover * pal._ch.countVoices())
except:
nchoo = 0
sound = AudioSegment.silent(total)
them = self.getTheme(pal)
for i in range(nvoic):
voic = them._sorting[i].getVoice(them)
print(them._sorting[i].indicationStr(them)) #DEBUG !!
vsound = voic.partialAudio(self._size, bpm)
sound = sound.overlay(vsound)
them = pal._ge
for i in range(ngeno):
voic = them._sorting[i].getVoice(them)
print(them._sorting[i].indicationStr(them)) #DEBUG !!
vsound = voic.partialAudio(self._size, bpm)
sound = sound.overlay(vsound)
them = pal._ch
for i in range(nchoo):
voic = them._sorting[i].getVoice(them)
print(them._sorting[i].indicationStr(them)) #DEBUG !!
vsound = voic.partialAudio(self._size, bpm)
sound = sound.overlay(vsound)
return sound
def baseDur(self, pal, bpm): #get the base duration of this part of the song
return self.getTheme(pal).baseDurForStruct(self._size, bpm)
class Structure:
def __init__(self):
self._parts = ()
def add(self, part):
self._parts = self._parts+(part,)
def __repr__(self):
return "@STRUCTURE:" + str(self._parts)
def baseDur(self, pal, bpm=None):
if bpm == None:
bpm = pal._bpm
curTime = 0
for p in self._parts:
curTime = curTime + p.baseDur(pal, bpm)
return curTime
def songAudio(self, pal, bpm=None):
if bpm == None:
bpm = pal._bpm
total = self.baseDur(pal, bpm) + 3000 # 3 seconds for last note to play
sound = AudioSegment.silent(total)
curTime = 0
for p in self._parts:
paudio = p.getAudio(pal, bpm)
sound = sound.overlay(paudio, curTime)
curTime = curTime + p.baseDur(pal, bpm)
print("curTime:",curTime)
return sound
# wselect WeightedSelect returns element of dictionary based on dict weights {element:weight}
def wselect(dicti):
total=0
for i in list(dicti):
total = total + dicti[i]
indice = total*random.random()
for i in list(dicti):
if dicti[i]>=indice:
return i
indice = indice - dicti[i]
raise ValueError ("something went wrong")
# rselect RandomSelect returns random element of list
def rselect(lista):
return random.choice(lista)
def lenweights():
return {3:1, 4:1, 5:2, 6:3, 7:4, 8:3, 9:2, 10:1, 11:1}
def stweights():
return {"n1":5, "n2":4, "ch":2, "bg":1}
def n1weights():
return {"n1":4, "n2":2, "ch":3, "bg":1}
def n2weights():
return {"n1":2, "n2":3, "ch":4, "bg":2}
def chweights():
return {"n1":2, "n2":1, "ch":4, "bg":1}
def bgweights():
return {"n1":1, "n2":1, "ch":20, "bg":8}
def typeSequence(size):
last = wselect(stweights())
sequence=(last,)
while len(sequence)<size:
if last == "n1":
last = wselect(n1weights())
elif last == "n2":
last = wselect(n2weights())
elif last == "ch":
last = wselect(chweights())
elif last == "bg":
last = wselect(bgweights())
sequence = sequence + (last,)
return sequence
def siweights():
return {0.1:1, 0.2:2, 0.3:4, 0.4:5, 0.5:5, 0.6:4, 0.7:3, 0.8:2, 0.9:1}
def deltaweights():
return {-0.3:1, -0.2:1, -0.1:1, 0:5, 0.1:3, 0.2:2, 0.3:2}
def intensitySequence(size):
val = wselect(siweights())
sequence = (val,)
while len(sequence)<size:
val = val + wselect(deltaweights())
if val<0.1:
val = 0.1
if val>1:
val = 1
sequence = sequence + (val,)
return sequence
def soweights():
return {0:6, 0.1:2, 0.2:1}
def deltoweights():
return {-0.2:1, -0.1:1, 0:8, 0.1:2, 0.2:2}
def overlaySequence(size):
val = wselect(soweights())
sequence = (val,)
while len(sequence)<size:
val = val + wselect(deltoweights())
if val<0.1:
val = 0.1
if val>1:
val = 1
sequence = sequence + (val,)
return sequence
def ssweights():
return {0.2:1, 0.4:1, 0.6:1, 0.8:1, 1:16}
def sizeSequence(size):
sequence = ()
while len(sequence)<size:
sequence = sequence + (wselect(ssweights()),)
return sequence
def makeStruct(size = None):
if size == None:
size = wselect(lenweights())
types = typeSequence(size)
inten = intensitySequence(size)
sizes = sizeSequence(size)
overl = overlaySequence(size)
return joinSeqs(types, inten, sizes, overl)
def joinSeqs(types, inten, sizes, overl):
struct = Structure()
for i in range(len(types)):
if types[i]=="bg":
string = "["+types[i]+"-"+str(inten[i])+"-"+str(sizes[i])+"-"+"0"+"-"+str(overl[i])+"]" # If its a bridge it has chord overlay
pt = Part.fromString(string)
struct.add(pt)
else:
string = "["+types[i]+"-"+str(inten[i])+"-"+str(sizes[i])+"-"+str(overl[i])+"-"+"0"+"]" # Else it has gen overlay
pt = Part.fromString(string)
struct.add(pt)
return struct
def pooptest():
for i in range(30):
print(makeStruct())
| joaoperfig/mikezart | source/markovzart2.py | Python | mit | 8,058 |
from . uuid64 import *
| jdowner/uuid64 | uuid64/__init__.py | Python | mit | 23 |
#!/usr/bin/env python
# T. Carman
# January 2017
import os
import json
def get_CMTs_in_file(aFile):
'''
Gets a list of the CMTs found in a file.
Parameters
----------
aFile : string, required
The path to a file to read.
Returns
-------
A list of CMTs found in a file.
'''
data = read_paramfile(aFile)
cmtkey_list = []
for line in data:
if line.find('CMT') >= 0:
sidx = line.find('CMT')
cmtkey_list.append(line[sidx:sidx+5])
return cmtkey_list
def find_cmt_start_idx(data, cmtkey):
'''
Finds the starting index for a CMT data block in a list of lines.
Parameters
----------
data : [str, str, ...]
A list of strings (maybe from a parameter file)
cmtkey : str
A a CMT code string like 'CMT05' to search for in the list.
Returns
-------
i : int
The first index in the list where the CMT key is found. If key is not found
returns None.
'''
for i, line in enumerate(data):
if cmtkey.upper() in line:
return i
# Key not found
return None
def read_paramfile(thefile):
'''
Opens and reads a file, returning the data as a list of lines (with newlines).
Parameters
----------
theFile : str
A path to a file to open and read.
Returns
-------
d : [str, str, str, ...]
A list of strings (with newlines at the end of each string).
'''
with open(thefile, 'r') as f:
data = f.readlines()
return data
def get_CMT_datablock(afile, cmtnum):
'''
Search file, returns the first block of data for one CMT as a list of strings.
Parameters
----------
afile : str
Path to a file to search.
cmtnum : int
The CMT number to search for. Converted (internally) to the CMT key.
Returns
-------
d : [str, str, ...]
A list of strings, one item for each line in the CMT's datablock.
Each string will have a newline charachter in it.
'''
data = read_paramfile(afile)
cmtkey = 'CMT%02i' % cmtnum
startidx = find_cmt_start_idx(data, cmtkey)
end = None
for i, line in enumerate(data[startidx:]):
if i == 0: # Header line, e.g.: "// CMT07 // Heath Tundra - (ma.....""
pass
elif i == 1: # PFT name line, e,g.: "//Decid. E.green ...."
# Not sure how/why this is working on non-PFT data blocks
# but is seems to do the trick?
pass
if (i > 0) and "CMT" in line:
#print "end of datablock, i=", i
end = startidx + i
break
return data[startidx:end]
def detect_block_with_pft_info(cmtdatablock):
# Perhaps should look at all lines??
secondline = cmtdatablock[1].strip("//").split()
if len(secondline) >= 9:
#print "Looks like a PFT header line!"
return True
else:
return False
def parse_header_line(datablock):
'''Splits a header line into components: cmtkey, text name, comment.
Assumes a CMT block header line looks like this:
// CMT07 // Heath Tundra - (ma.....
'''
# Assume header is first line
l0 = datablock[0]
# Header line, e.g:
header = l0.strip().strip("//").strip().split("//")
hdr_cmtkey = header[0].strip()
txtcmtname = header[1].strip().split('-')[0].strip()
hdrcomment = header[1].strip().split('-')[1].strip()
return hdr_cmtkey, txtcmtname, hdrcomment
def get_pft_verbose_name(cmtkey=None, pftkey=None, cmtnum=None, pftnum=None):
path2params = os.path.join(os.path.split(os.path.dirname(os.path.realpath(__file__)))[0], 'parameters/')
if cmtkey and cmtnum:
raise ValueError("you must provide only one of you cmtkey or cmtnumber")
if pftkey and pftnum:
raise ValueError("you must provide only one of pftkey or pftnumber")
if cmtkey: # convert to number
cmtnum = int(cmtkey.lstrip('CMT'))
if pftnum: # convert to key
pftkey = 'pft%i' % pftnum
data = get_CMT_datablock(os.path.join(path2params, 'cmt_calparbgc.txt'), cmtnum)
dd = cmtdatablock2dict(data)
return dd[pftkey.lower()]['name']
def cmtdatablock2dict(cmtdatablock):
'''
Converts a "CMT datablock" (list of strings) into a dict structure.
Parameters
----------
cmtdatablock : [str, str, ...]
A list of strings (with new lines) holding parameter data for a CMT.
Returns
-------
d : dict
A multi-level dict mapping names (deduced from comments) to parameter
values.holding parameter values.
'''
cmtdict = {}
pftblock = detect_block_with_pft_info(cmtdatablock)
hdr_cmtkey, txtcmtname, hdrcomment = parse_header_line(cmtdatablock)
cmtdict['tag'] = hdr_cmtkey
cmtdict['cmtname'] = txtcmtname
cmtdict['comment'] = hdrcomment
if pftblock:
# Look at the second line for something like this:
# PFT name line, like: "//Decid. E.green ...."
pftlist = cmtdatablock[1].strip("//").strip().split()
pftnames = pftlist[0:10]
for i, pftname in enumerate(pftnames):
cmtdict['pft%i'%i] = {}
cmtdict['pft%i'%i]['name'] = pftname
for i, line in enumerate(cmtdatablock):
if line.strip()[0:2] == "//":
#print "passing line", i
continue # Nothing to do...commented line
else: # normal data line
dline = line.strip().split("//")
values = dline[0].split()
comment = dline[1].strip().strip("//").split(':')[0]
if len(values) >= 5: # <--ARBITRARY! likely a pft data line?
for i, value in enumerate(values):
cmtdict['pft%i'%i][comment] = float(value)
else:
cmtdict[comment] = float(values[0])
return cmtdict
def format_CMTdatadict(dd, refFile, format=None):
'''
Returns a formatted block of CMT data.
Parameters
----------
dd : dict
Dictionary containing parameter names and values for a CMT.
refFile : str
A path to a file that should be used for reference in formatting the output.
format : str (optional)
A string specifying which format to return. Defaults to None.
Returns
-------
d : [str, str, ...]
A list of strings
'''
if format is not None:
print "NOT IMPLEMENTED YET!"
exit(-1)
ref_order = generate_reference_order(refFile)
dwpftvs = False
ll = []
ll.append("// First line comment...")
ll.append("// Second line comment (?? PFT string?)")
def is_pft_var(v):
if v not in dd.keys() and v in dd['pft0'].keys():
return True
else:
return False
for var in ref_order:
if not is_pft_var(var):
pass
else:
# get each item from dict, append to line
linestring = ''
for pft in get_datablock_pftkeys(dd):
linestring += "{:>12.6f} ".format(dd[pft][var])
linestring += ('// %s: ' % var)
ll.append(linestring)
for var in ref_order:
if is_pft_var(var):
pass # Nothing to do; already did pft stuff
else:
# get item from dict, append to line
ll.append('{:<12.5f} // comment??'.format(dd[var]))
return ll
def generate_reference_order(aFile):
'''
Lists order that variables should be in in a parameter file based on CMT 0.
Parameters
----------
aFile: str
The file to use as a base.
Returns
-------
l : [str, str, ...]
A list of strings containing the variable names, parsed from the input file
in the order they appear in the input file.
'''
cmt_calparbgc = []
db = get_CMT_datablock(aFile, 0)
pftblock = detect_block_with_pft_info(db)
ref_order = []
for line in db:
t = comment_splitter(line)
if t[0] == '':
pass # nothing before the comment, ignore this line - is has no data
else:
# looks like t0 has some data, so now we need the
# comment (t[1]) which we will further refine to get the
# tag, which we will append to the "reference order" list
tokens = t[1].strip().lstrip("//").strip().split(":")
tag = tokens[0]
desc = "".join(tokens[1:])
print "Found tag:", tag, " Desc: ", desc
ref_order.append(tag)
return ref_order
def comment_splitter(line):
'''
Splits a string into data before comment and after comment.
The comment delimiter ('//') will be included in the after component.
Parameters
----------
line : str
A string representing the line of data. May or may not contain the comment
delimiter.
Returns
-------
t : (str, str) - Tuple of strings.
A tuple containing the "before comment" string, and the "after comment"
string. The "after commnet" string will include the comment charachter.
'''
cmtidx = line.find("//")
if cmtidx < 0:
return (line, '')
else:
return (line[0:cmtidx], line[cmtidx:])
def get_datablock_pftkeys(dd):
'''
Returns a sorted list of the pft keys present in a CMT data dictionary.
Parameters
----------
dd : dict
A CMT data dictionary (as might be created from cmtdatablock2dict(..)).
Returns
-------
A sorted list of the keys present in dd that contain the string 'pft'.
'''
return sorted([i for i in dd.keys() if 'pft' in i])
def enforce_initvegc_split(aFile, cmtnum):
'''
Makes sure that the 'cpart' compartments variables match the proportions
set in initvegc variables in a cmt_bgcvegetation.txt file.
The initvegc(leaf, wood, root) variables in cmt_bgcvegetation.txt are the
measured values from literature. The cpar(leaf, wood, root) variables, which
are in the same file, should be set to the fractional make up of the the
components. So if the initvegc values for l, w, r are 100, 200, 300, then the
cpart values should be 0.166, 0.33, and 0.5. It is very easy for these values
to get out of sync when users manually update the parameter file.
Parameters
----------
aFile : str
Path to a parameter file to work on. Must have bgcvegetation.txt in the name
and must be a 'bgcvegetation' parameter file for this function to make sense
and work.
cmtnum : int
The community number in the file to work on.
Returns
-------
d : dict
A CMT data dictionary with the updated cpart values.
'''
if ('bgcvegetation.txt' not in aFile):
raise ValueError("This function only makes sense on cmt_bgcvegetation.txt files.")
d = get_CMT_datablock(aFile, cmtnum)
dd = cmtdatablock2dict(d)
for pft in get_datablock_pftkeys(dd):
sumC = dd[pft]['initvegcl'] + dd[pft]['initvegcw'] + dd[pft]['initvegcr']
if sumC > 0.0:
dd[pft]['cpartl'] = dd[pft]['initvegcl'] / sumC
dd[pft]['cpartw'] = dd[pft]['initvegcw'] / sumC
dd[pft]['cpartr'] = dd[pft]['initvegcr'] / sumC
else:
dd[pft]['cpartl'] = 0.0
dd[pft]['cpartw'] = 0.0
dd[pft]['cpartr'] = 0.0
return dd
if __name__ == '__main__':
print "NOTE! Does not work correctly on non-PFT files yet!!"
testFiles = [
'parameters/cmt_calparbgc.txt',
'parameters/cmt_bgcsoil.txt',
'parameters/cmt_bgcvegetation.txt',
'parameters/cmt_calparbgc.txt.backupsomeparams',
'parameters/cmt_dimground.txt',
'parameters/cmt_dimvegetation.txt',
'parameters/cmt_envcanopy.txt',
'parameters/cmt_envground.txt',
'parameters/cmt_firepar.txt'
]
for i in testFiles:
print "{:>45s}: {}".format(i, get_CMTs_in_file(i))
# for i in testFiles:
# print "{:>45s}".format(i)
# print "".join(get_CMT_datablock(i, 2))
# print "{:45s}".format("DONE")
d = get_CMT_datablock(testFiles[4], 2)
print "".join(d)
print json.dumps(cmtdatablock2dict(d), sort_keys=True, indent=2)
print "NOTE! Does not work correctly on non-PFT files yet!!"
| tobeycarman/dvm-dos-tem | scripts/param_util.py | Python | mit | 11,387 |
# -*- test-case-name: twisted.test.test_task,twisted.test.test_cooperator -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Scheduling utility methods and classes.
@author: Jp Calderone
"""
__metaclass__ = type
import time
from zope.interface import implements
from twisted.python import reflect
from twisted.python.failure import Failure
from twisted.internet import base, defer
from twisted.internet.interfaces import IReactorTime
class LoopingCall:
"""Call a function repeatedly.
If C{f} returns a deferred, rescheduling will not take place until the
deferred has fired. The result value is ignored.
@ivar f: The function to call.
@ivar a: A tuple of arguments to pass the function.
@ivar kw: A dictionary of keyword arguments to pass to the function.
@ivar clock: A provider of
L{twisted.internet.interfaces.IReactorTime}. The default is
L{twisted.internet.reactor}. Feel free to set this to
something else, but it probably ought to be set *before*
calling L{start}.
@type running: C{bool}
@ivar running: A flag which is C{True} while C{f} is scheduled to be called
(or is currently being called). It is set to C{True} when L{start} is
called and set to C{False} when L{stop} is called or if C{f} raises an
exception. In either case, it will be C{False} by the time the
C{Deferred} returned by L{start} fires its callback or errback.
@type _expectNextCallAt: C{float}
@ivar _expectNextCallAt: The time at which this instance most recently
scheduled itself to run.
@type _realLastTime: C{float}
@ivar _realLastTime: When counting skips, the time at which the skip
counter was last invoked.
@type _runAtStart: C{bool}
@ivar _runAtStart: A flag indicating whether the 'now' argument was passed
to L{LoopingCall.start}.
"""
call = None
running = False
deferred = None
interval = None
_expectNextCallAt = 0.0
_runAtStart = False
starttime = None
def __init__(self, f, *a, **kw):
self.f = f
self.a = a
self.kw = kw
from twisted.internet import reactor
self.clock = reactor
def withCount(cls, countCallable):
"""
An alternate constructor for L{LoopingCall} that makes available the
number of calls which should have occurred since it was last invoked.
Note that this number is an C{int} value; It represents the discrete
number of calls that should have been made. For example, if you are
using a looping call to display an animation with discrete frames, this
number would be the number of frames to advance.
The count is normally 1, but can be higher. For example, if the reactor
is blocked and takes too long to invoke the L{LoopingCall}, a Deferred
returned from a previous call is not fired before an interval has
elapsed, or if the callable itself blocks for longer than an interval,
preventing I{itself} from being called.
@param countCallable: A callable that will be invoked each time the
resulting LoopingCall is run, with an integer specifying the number
of calls that should have been invoked.
@type countCallable: 1-argument callable which takes an C{int}
@return: An instance of L{LoopingCall} with call counting enabled,
which provides the count as the first positional argument.
@rtype: L{LoopingCall}
@since: 9.0
"""
def counter():
now = self.clock.seconds()
lastTime = self._realLastTime
if lastTime is None:
lastTime = self.starttime
if self._runAtStart:
lastTime -= self.interval
self._realLastTime = now
lastInterval = self._intervalOf(lastTime)
thisInterval = self._intervalOf(now)
count = thisInterval - lastInterval
return countCallable(count)
self = cls(counter)
self._realLastTime = None
return self
withCount = classmethod(withCount)
def _intervalOf(self, t):
"""
Determine the number of intervals passed as of the given point in
time.
@param t: The specified time (from the start of the L{LoopingCall}) to
be measured in intervals
@return: The C{int} number of intervals which have passed as of the
given point in time.
"""
elapsedTime = t - self.starttime
intervalNum = int(elapsedTime / self.interval)
return intervalNum
def start(self, interval, now=True):
"""
Start running function every interval seconds.
@param interval: The number of seconds between calls. May be
less than one. Precision will depend on the underlying
platform, the available hardware, and the load on the system.
@param now: If True, run this call right now. Otherwise, wait
until the interval has elapsed before beginning.
@return: A Deferred whose callback will be invoked with
C{self} when C{self.stop} is called, or whose errback will be
invoked when the function raises an exception or returned a
deferred that has its errback invoked.
"""
assert not self.running, ("Tried to start an already running "
"LoopingCall.")
if interval < 0:
raise ValueError, "interval must be >= 0"
self.running = True
d = self.deferred = defer.Deferred()
self.starttime = self.clock.seconds()
self._expectNextCallAt = self.starttime
self.interval = interval
self._runAtStart = now
if now:
self()
else:
self._reschedule()
return d
def stop(self):
"""Stop running function.
"""
assert self.running, ("Tried to stop a LoopingCall that was "
"not running.")
self.running = False
if self.call is not None:
self.call.cancel()
self.call = None
d, self.deferred = self.deferred, None
d.callback(self)
def reset(self):
"""
Skip the next iteration and reset the timer.
@since: 11.1
"""
assert self.running, ("Tried to reset a LoopingCall that was "
"not running.")
if self.call is not None:
self.call.cancel()
self.call = None
self._expectNextCallAt = self.clock.seconds()
self._reschedule()
def __call__(self):
def cb(result):
if self.running:
self._reschedule()
else:
d, self.deferred = self.deferred, None
d.callback(self)
def eb(failure):
self.running = False
d, self.deferred = self.deferred, None
d.errback(failure)
self.call = None
d = defer.maybeDeferred(self.f, *self.a, **self.kw)
d.addCallback(cb)
d.addErrback(eb)
def _reschedule(self):
"""
Schedule the next iteration of this looping call.
"""
if self.interval == 0:
self.call = self.clock.callLater(0, self)
return
currentTime = self.clock.seconds()
# Find how long is left until the interval comes around again.
untilNextTime = (self._expectNextCallAt - currentTime) % self.interval
# Make sure it is in the future, in case more than one interval worth
# of time passed since the previous call was made.
nextTime = max(
self._expectNextCallAt + self.interval, currentTime + untilNextTime)
# If the interval falls on the current time exactly, skip it and
# schedule the call for the next interval.
if nextTime == currentTime:
nextTime += self.interval
self._expectNextCallAt = nextTime
self.call = self.clock.callLater(nextTime - currentTime, self)
def __repr__(self):
if hasattr(self.f, 'func_name'):
func = self.f.func_name
if hasattr(self.f, 'im_class'):
func = self.f.im_class.__name__ + '.' + func
else:
func = reflect.safe_repr(self.f)
return 'LoopingCall<%r>(%s, *%s, **%s)' % (
self.interval, func, reflect.safe_repr(self.a),
reflect.safe_repr(self.kw))
class SchedulerError(Exception):
"""
The operation could not be completed because the scheduler or one of its
tasks was in an invalid state. This exception should not be raised
directly, but is a superclass of various scheduler-state-related
exceptions.
"""
class SchedulerStopped(SchedulerError):
"""
The operation could not complete because the scheduler was stopped in
progress or was already stopped.
"""
class TaskFinished(SchedulerError):
"""
The operation could not complete because the task was already completed,
stopped, encountered an error or otherwise permanently stopped running.
"""
class TaskDone(TaskFinished):
"""
The operation could not complete because the task was already completed.
"""
class TaskStopped(TaskFinished):
"""
The operation could not complete because the task was stopped.
"""
class TaskFailed(TaskFinished):
"""
The operation could not complete because the task died with an unhandled
error.
"""
class NotPaused(SchedulerError):
"""
This exception is raised when a task is resumed which was not previously
paused.
"""
class _Timer(object):
MAX_SLICE = 0.01
def __init__(self):
self.end = time.time() + self.MAX_SLICE
def __call__(self):
return time.time() >= self.end
_EPSILON = 0.00000001
def _defaultScheduler(x):
from twisted.internet import reactor
return reactor.callLater(_EPSILON, x)
class CooperativeTask(object):
"""
A L{CooperativeTask} is a task object inside a L{Cooperator}, which can be
paused, resumed, and stopped. It can also have its completion (or
termination) monitored.
@see: L{CooperativeTask.cooperate}
@ivar _iterator: the iterator to iterate when this L{CooperativeTask} is
asked to do work.
@ivar _cooperator: the L{Cooperator} that this L{CooperativeTask}
participates in, which is used to re-insert it upon resume.
@ivar _deferreds: the list of L{defer.Deferred}s to fire when this task
completes, fails, or finishes.
@type _deferreds: L{list}
@type _cooperator: L{Cooperator}
@ivar _pauseCount: the number of times that this L{CooperativeTask} has
been paused; if 0, it is running.
@type _pauseCount: L{int}
@ivar _completionState: The completion-state of this L{CooperativeTask}.
C{None} if the task is not yet completed, an instance of L{TaskStopped}
if C{stop} was called to stop this task early, of L{TaskFailed} if the
application code in the iterator raised an exception which caused it to
terminate, and of L{TaskDone} if it terminated normally via raising
L{StopIteration}.
@type _completionState: L{TaskFinished}
"""
def __init__(self, iterator, cooperator):
"""
A private constructor: to create a new L{CooperativeTask}, see
L{Cooperator.cooperate}.
"""
self._iterator = iterator
self._cooperator = cooperator
self._deferreds = []
self._pauseCount = 0
self._completionState = None
self._completionResult = None
cooperator._addTask(self)
def whenDone(self):
"""
Get a L{defer.Deferred} notification of when this task is complete.
@return: a L{defer.Deferred} that fires with the C{iterator} that this
L{CooperativeTask} was created with when the iterator has been
exhausted (i.e. its C{next} method has raised L{StopIteration}), or
fails with the exception raised by C{next} if it raises some other
exception.
@rtype: L{defer.Deferred}
"""
d = defer.Deferred()
if self._completionState is None:
self._deferreds.append(d)
else:
d.callback(self._completionResult)
return d
def pause(self):
"""
Pause this L{CooperativeTask}. Stop doing work until
L{CooperativeTask.resume} is called. If C{pause} is called more than
once, C{resume} must be called an equal number of times to resume this
task.
@raise TaskFinished: if this task has already finished or completed.
"""
self._checkFinish()
self._pauseCount += 1
if self._pauseCount == 1:
self._cooperator._removeTask(self)
def resume(self):
"""
Resume processing of a paused L{CooperativeTask}.
@raise NotPaused: if this L{CooperativeTask} is not paused.
"""
if self._pauseCount == 0:
raise NotPaused()
self._pauseCount -= 1
if self._pauseCount == 0 and self._completionState is None:
self._cooperator._addTask(self)
def _completeWith(self, completionState, deferredResult):
"""
@param completionState: a L{TaskFinished} exception or a subclass
thereof, indicating what exception should be raised when subsequent
operations are performed.
@param deferredResult: the result to fire all the deferreds with.
"""
self._completionState = completionState
self._completionResult = deferredResult
if not self._pauseCount:
self._cooperator._removeTask(self)
# The Deferreds need to be invoked after all this is completed, because
# a Deferred may want to manipulate other tasks in a Cooperator. For
# example, if you call "stop()" on a cooperator in a callback on a
# Deferred returned from whenDone(), this CooperativeTask must be gone
# from the Cooperator by that point so that _completeWith is not
# invoked reentrantly; that would cause these Deferreds to blow up with
# an AlreadyCalledError, or the _removeTask to fail with a ValueError.
for d in self._deferreds:
d.callback(deferredResult)
def stop(self):
"""
Stop further processing of this task.
@raise TaskFinished: if this L{CooperativeTask} has previously
completed, via C{stop}, completion, or failure.
"""
self._checkFinish()
self._completeWith(TaskStopped(), Failure(TaskStopped()))
def _checkFinish(self):
"""
If this task has been stopped, raise the appropriate subclass of
L{TaskFinished}.
"""
if self._completionState is not None:
raise self._completionState
def _oneWorkUnit(self):
"""
Perform one unit of work for this task, retrieving one item from its
iterator, stopping if there are no further items in the iterator, and
pausing if the result was a L{defer.Deferred}.
"""
try:
result = self._iterator.next()
except StopIteration:
self._completeWith(TaskDone(), self._iterator)
except:
self._completeWith(TaskFailed(), Failure())
else:
if isinstance(result, defer.Deferred):
self.pause()
def failLater(f):
self._completeWith(TaskFailed(), f)
result.addCallbacks(lambda result: self.resume(),
failLater)
class Cooperator(object):
"""
Cooperative task scheduler.
"""
def __init__(self,
terminationPredicateFactory=_Timer,
scheduler=_defaultScheduler,
started=True):
"""
Create a scheduler-like object to which iterators may be added.
@param terminationPredicateFactory: A no-argument callable which will
be invoked at the beginning of each step and should return a
no-argument callable which will return True when the step should be
terminated. The default factory is time-based and allows iterators to
run for 1/100th of a second at a time.
@param scheduler: A one-argument callable which takes a no-argument
callable and should invoke it at some future point. This will be used
to schedule each step of this Cooperator.
@param started: A boolean which indicates whether iterators should be
stepped as soon as they are added, or if they will be queued up until
L{Cooperator.start} is called.
"""
self._tasks = []
self._metarator = iter(())
self._terminationPredicateFactory = terminationPredicateFactory
self._scheduler = scheduler
self._delayedCall = None
self._stopped = False
self._started = started
def coiterate(self, iterator, doneDeferred=None):
"""
Add an iterator to the list of iterators this L{Cooperator} is
currently running.
@param doneDeferred: If specified, this will be the Deferred used as
the completion deferred. It is suggested that you use the default,
which creates a new Deferred for you.
@return: a Deferred that will fire when the iterator finishes.
"""
if doneDeferred is None:
doneDeferred = defer.Deferred()
CooperativeTask(iterator, self).whenDone().chainDeferred(doneDeferred)
return doneDeferred
def cooperate(self, iterator):
"""
Start running the given iterator as a long-running cooperative task, by
calling next() on it as a periodic timed event.
@param iterator: the iterator to invoke.
@return: a L{CooperativeTask} object representing this task.
"""
return CooperativeTask(iterator, self)
def _addTask(self, task):
"""
Add a L{CooperativeTask} object to this L{Cooperator}.
"""
if self._stopped:
self._tasks.append(task) # XXX silly, I know, but _completeWith
# does the inverse
task._completeWith(SchedulerStopped(), Failure(SchedulerStopped()))
else:
self._tasks.append(task)
self._reschedule()
def _removeTask(self, task):
"""
Remove a L{CooperativeTask} from this L{Cooperator}.
"""
self._tasks.remove(task)
# If no work left to do, cancel the delayed call:
if not self._tasks and self._delayedCall:
self._delayedCall.cancel()
self._delayedCall = None
def _tasksWhileNotStopped(self):
"""
Yield all L{CooperativeTask} objects in a loop as long as this
L{Cooperator}'s termination condition has not been met.
"""
terminator = self._terminationPredicateFactory()
while self._tasks:
for t in self._metarator:
yield t
if terminator():
return
self._metarator = iter(self._tasks)
def _tick(self):
"""
Run one scheduler tick.
"""
self._delayedCall = None
for taskObj in self._tasksWhileNotStopped():
taskObj._oneWorkUnit()
self._reschedule()
_mustScheduleOnStart = False
def _reschedule(self):
if not self._started:
self._mustScheduleOnStart = True
return
if self._delayedCall is None and self._tasks:
self._delayedCall = self._scheduler(self._tick)
def start(self):
"""
Begin scheduling steps.
"""
self._stopped = False
self._started = True
if self._mustScheduleOnStart:
del self._mustScheduleOnStart
self._reschedule()
def stop(self):
"""
Stop scheduling steps. Errback the completion Deferreds of all
iterators which have been added and forget about them.
"""
self._stopped = True
for taskObj in self._tasks:
taskObj._completeWith(SchedulerStopped(),
Failure(SchedulerStopped()))
self._tasks = []
if self._delayedCall is not None:
self._delayedCall.cancel()
self._delayedCall = None
_theCooperator = Cooperator()
def coiterate(iterator):
"""
Cooperatively iterate over the given iterator, dividing runtime between it
and all other iterators which have been passed to this function and not yet
exhausted.
"""
return _theCooperator.coiterate(iterator)
def cooperate(iterator):
"""
Start running the given iterator as a long-running cooperative task, by
calling next() on it as a periodic timed event.
@param iterator: the iterator to invoke.
@return: a L{CooperativeTask} object representing this task.
"""
return _theCooperator.cooperate(iterator)
class Clock:
"""
Provide a deterministic, easily-controlled implementation of
L{IReactorTime.callLater}. This is commonly useful for writing
deterministic unit tests for code which schedules events using this API.
"""
implements(IReactorTime)
rightNow = 0.0
def __init__(self):
self.calls = []
def seconds(self):
"""
Pretend to be time.time(). This is used internally when an operation
such as L{IDelayedCall.reset} needs to determine a a time value
relative to the current time.
@rtype: C{float}
@return: The time which should be considered the current time.
"""
return self.rightNow
def _sortCalls(self):
"""
Sort the pending calls according to the time they are scheduled.
"""
self.calls.sort(lambda a, b: cmp(a.getTime(), b.getTime()))
def callLater(self, when, what, *a, **kw):
"""
See L{twisted.internet.interfaces.IReactorTime.callLater}.
"""
dc = base.DelayedCall(self.seconds() + when,
what, a, kw,
self.calls.remove,
lambda c: None,
self.seconds)
self.calls.append(dc)
self._sortCalls()
return dc
def getDelayedCalls(self):
"""
See L{twisted.internet.interfaces.IReactorTime.getDelayedCalls}
"""
return self.calls
def advance(self, amount):
"""
Move time on this clock forward by the given amount and run whatever
pending calls should be run.
@type amount: C{float}
@param amount: The number of seconds which to advance this clock's
time.
"""
self.rightNow += amount
self._sortCalls()
while self.calls and self.calls[0].getTime() <= self.seconds():
call = self.calls.pop(0)
call.called = 1
call.func(*call.args, **call.kw)
self._sortCalls()
def pump(self, timings):
"""
Advance incrementally by the given set of times.
@type timings: iterable of C{float}
"""
for amount in timings:
self.advance(amount)
def deferLater(clock, delay, callable, *args, **kw):
"""
Call the given function after a certain period of time has passed.
@type clock: L{IReactorTime} provider
@param clock: The object which will be used to schedule the delayed
call.
@type delay: C{float} or C{int}
@param delay: The number of seconds to wait before calling the function.
@param callable: The object to call after the delay.
@param *args: The positional arguments to pass to C{callable}.
@param **kw: The keyword arguments to pass to C{callable}.
@rtype: L{defer.Deferred}
@return: A deferred that fires with the result of the callable when the
specified time has elapsed.
"""
def deferLaterCancel(deferred):
delayedCall.cancel()
d = defer.Deferred(deferLaterCancel)
d.addCallback(lambda ignored: callable(*args, **kw))
delayedCall = clock.callLater(delay, d.callback, None)
return d
__all__ = [
'LoopingCall',
'Clock',
'SchedulerStopped', 'Cooperator', 'coiterate',
'deferLater',
]
| Varriount/Colliberation | libs/twisted/internet/task.py | Python | mit | 24,723 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-21 12:06
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
('slug', models.SlugField(allow_unicode=True, unique=True)),
('description', models.TextField(blank=True, default='')),
('description_html', models.TextField(blank=True, default='', editable=False)),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='GroupMember',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='memberships', to='groups.Group')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_groups', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='group',
name='members',
field=models.ManyToManyField(through='groups.GroupMember', to=settings.AUTH_USER_MODEL),
),
migrations.AlterUniqueTogether(
name='groupmember',
unique_together=set([('group', 'user')]),
),
]
| srijannnd/Login-and-Register-App-in-Django | simplesocial/groups/migrations/0001_initial.py | Python | mit | 1,868 |
from network import WLAN
###############################################################################
# Settings for WLAN STA mode
###############################################################################
WLAN_MODE = 'off'
#WLAN_SSID = ''
#WLAN_AUTH = (WLAN.WPA2,'')
###############################################################################
# LoRaWAN Configuration
###############################################################################
# May be either 'otaa', 'abp', or 'off'
LORA_MODE = 'otaa'
# Settings for mode 'otaa'
LORA_OTAA_EUI = '70B3D57EF0001ED4'
LORA_OTAA_KEY = None # See README.md for instructions!
# Settings for mode 'abp'
#LORA_ABP_DEVADDR = ''
#LORA_ABP_NETKEY = ''
#LORA_ABP_APPKEY = ''
# Interval between measures transmitted to TTN.
# Measured airtime of transmission is 56.6 ms, fair use policy limits us to
# 30 seconds per day (= roughly 500 messages). We default to a 180 second
# interval (=480 messages / day).
LORA_SEND_RATE = 180
###############################################################################
# GNSS Configuration
###############################################################################
GNSS_UART_PORT = 1
GNSS_UART_BAUD = 9600
GNSS_ENABLE_PIN = 'P8'
| ttn-be/ttnmapper | config.py | Python | mit | 1,297 |
from django.dispatch import Signal
user_email_bounced = Signal() # args: ['bounce', 'should_deactivate']
email_bounced = Signal() # args: ['bounce', 'should_deactivate']
email_unsubscribed = Signal() # args: ['email', 'reference']
| fin/froide | froide/bounce/signals.py | Python | mit | 236 |
a = [int(i) for i in input().split()]
print(sum(a))
| maisilex/Lets-Begin-Python | list.py | Python | mit | 52 |
from django.conf import settings
from django.conf.urls import static
from django.urls import include, path, re_path
from django.contrib import admin
urlpatterns = [
path(r"admin/", admin.site.urls),
path(r"flickr/", include("ditto.flickr.urls")),
path(r"lastfm/", include("ditto.lastfm.urls")),
path(r"pinboard/", include("ditto.pinboard.urls")),
path(r"twitter/", include("ditto.twitter.urls")),
path(r"", include("ditto.core.urls")),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
re_path(r"^__debug__/", include(debug_toolbar.urls)),
]
urlpatterns += static.static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static.static(
settings.STATIC_URL, document_root=settings.STATIC_ROOT
)
| philgyford/django-ditto | devproject/devproject/urls.py | Python | mit | 795 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from os import listdir
import os
import re
import sys
from argparse import ArgumentParser
import random
import subprocess
from math import sqrt
import ast
from adderror import adderror
"""ENSAMBLE, -d directory -n number of models """
"""-k number of selected structure"""
"""-r repet of program"""
files = []
pdb_files = []
exp_file = []
list_of_random_items_modified = []
list_of_random_items = []
selected_files_for_ensamble = []
def argument():
parser = ArgumentParser()
parser.add_argument("-d", "--dir", dest="myDirVariable",
help="Choose dir", metavar="DIR", required=True)
parser.add_argument("-n", metavar='N', type=int,
dest="number_of_selected_files",
help="Number of selected structure",
required=True)
parser.add_argument("-k", metavar='K', type=int,
dest="k_number_of_options",
help="Number of possibility structure, less then selected files",
required=True)
parser.add_argument("-q", metavar='Q', type=int,
dest="mixing_koeficient", help="Mixing koeficient",
default=1)
parser.add_argument("-r", metavar='R', type=int,
dest="repeat", help="Number of repetitions",
default=1)
parser.add_argument("--verbose", help="increase output verbosity",
action="store_true")
args = parser.parse_args()
global files
global list_of_random_items_modified
files = listdir(args.myDirVariable)
list_of_random_items_modified = [None]*args.k_number_of_options
return(args)
def rmsd_pymol(structure_1, structure_2):
with open("file_for_pymol.pml", "w") as file_for_pymol:
file_for_pymol.write("""
load {s1}
load {s2}
align {s3}, {s4}
quit
""".format(s1=structure_1, s2=structure_2,
s3=os.path.splitext(structure_1)[0],
s4=os.path.splitext(structure_2)[0]))
out_pymol = subprocess.check_output(" pymol -c file_for_pymol.pml | grep Executive:", shell=True)
#part for home:
out_pymol = subprocess.check_output(" pymol -c file_for_pymol.pml | grep Executive:", shell=True)
#part for META:out_pymol = subprocess.check_output("module add pymol-1.8.2.1-gcc; pymol -c file_for_pymol.pml | grep Executive:;module rm pymol-1.8.2.1-gcc", shell=True)
rmsd = float(out_pymol[out_pymol.index(b'=')+1:out_pymol.index(b'(')-1])
print('RMSD ', structure_1, ' and ', structure_2, ' = ', rmsd)
return rmsd
def searching_pdb():
for line in files:
line = line.rstrip()
if re.search('.pdb$', line):
#if re.search('.pdb.dat', line):
pdb_files.append(line)
#if re.search('exp.dat', line):
#print('experimental file', line)
# exp_file.append(line)
total_number_of_pdb_files = len(pdb_files)
return(total_number_of_pdb_files)
def argument_processing(args, total_number_of_pdb_files):
#print(args)
print('Parametrs ')
print('Total number of pdb files', total_number_of_pdb_files)
if total_number_of_pdb_files < args.number_of_selected_files:
print("Number od pdb files is ", total_number_of_pdb_files)
sys.exit(0)
if args.k_number_of_options > args.number_of_selected_files:
print("Number of selected structure is only", args.number_of_selected_files)
sys.exit(0)
if args.mixing_koeficient != 1:
print ("For q>1 is not implemented now \n")
sys.exit(0)
print('Files from directory', args.myDirVariable)
print('The number of the selected files',
args.number_of_selected_files)
print('The number of selected options', args.k_number_of_options)
print('All pdb.dat files \n', pdb_files)
global selected_files_for_ensamble
selected_files_for_ensamble = random.sample(pdb_files,
args.number_of_selected_files)
print('Randomly selected files: \n', selected_files_for_ensamble)
global list_of_random_items
list_of_random_items = random.sample(selected_files_for_ensamble,
args.k_number_of_options)
print('Randomly selected files: \n', list_of_random_items)
def using_adderror():
for i in range(args.k_number_of_options):
list_of_random_items_modified[i] = adderror("exp.dat",list_of_random_items[i]+'.dat')
str1 = ''.join(str(e)+"\n" for e in list_of_random_items_modified)
str2 = ''.join(str(e)+"\n" for e in list_of_random_items)
print(str1)
print(str2)
return(str1, str2)
def find_index(strings):
for e in list_of_random_items:
value_of_index[e] = selected_files_for_ensamble.index(e)
print(selected_files_for_ensamble.index(e))
with open("input_for_ensamble_fit", "w") as f:
f.write(strings[0])
def ensamble_fit():
ensable_output=[None]*args.k_number_of_options
for i in range(k_number_of_options):
command = "/storage/brno3-cerit/home/krab1k/saxs-ensamble-fit/core/ensamble-fit -L -p /storage/brno2/home/petrahrozkova/SAXS/mod -n " + str(args.number_of_selected_files) + " -m /storage/brno2/home/petrahrozkova/SAXS/" +list_of_random_items_modified[i]+".dat"
subprocess.call(command,shell=True)
ensable_output[i] = result_rmsd()
return(ensable_output)
def result_rmsd():
with open('result', 'r') as f:
(f.readline())
result = f.readline()
values_of_index_result = result.split(',')[4:]
return(values_of_index_result)
def pymol_processing(ensable_output):
sum_rmsd = 0
values_of_index_result = ensable_output[0]
dictionary_index_and_structure = dict()
for i, j in enumerate(selected_files_for_ensamble):
dictionary_index_and_structure[i] = j
for i, j in enumerate(values_of_index_result):
f = float(j)
if f != 0:
computed_rmsd = rmsd_pymol(selected_files_for_ensamble[i],
list_of_random_items[0])
print('Adjusted rmsd ', f*computed_rmsd, '\n')
sum_rmsd += f*computed_rmsd
print('Sum of RMSD', sum_rmsd)
if __name__ == '__main__':
args = argument()
total_number_of_pdb_files = searching_pdb()
for i in range(args.repeat):
argument_processing(args, total_number_of_pdb_files)
strings = using_adderror()
#find_index(strings)
# ensamble_output = ensamble-fit()
ensamble_output=[None]*2
ensamble_output[0] = result_rmsd()
if args.k_number_of_options ==1:
pymol_processing(ensamble_output)
else:
print("not implemented")
| spirit01/SAXS | test_caxs.py | Python | mit | 6,842 |
import collections
puzzle_input = (0,13,1,8,6,15)
test_inputs = [
([(0,3,6), 10], 0),
([(1,3,2)], 1),
([(2,1,3)], 10),
([(1,2,3)], 27),
([(2,3,1)], 78),
([(3,2,1)], 438),
([(3,1,2)], 1836),
# Expensive Tests
# ([(0,3,6), 30000000], 175594),
# ([(1,3,2), 30000000], 2578),
# ([(2,1,3), 30000000], 3544142),
# ([(1,2,3), 30000000], 261214),
# ([(2,3,1), 30000000], 6895259),
# ([(3,2,1), 30000000], 18),
# ([(3,1,2), 30000000], 362),
]
def iterate(input_, iterations=2020) -> int:
turn = 0
turn_last_spoken = collections.defaultdict(int)
prev_number = None
for value in input_:
turn_last_spoken[prev_number] = turn
prev_number = value
turn += 1
while turn < iterations:
current_number = turn_last_spoken[prev_number]
turn_last_spoken[prev_number] = turn
if current_number != 0:
current_number = turn - current_number
prev_number = current_number
turn += 1
return prev_number
for _input, expected_output in test_inputs:
print("Testing:", *_input, "...")
actual_output = iterate(*_input)
assert actual_output == expected_output, f"Expected: {expected_output}. Actual {actual_output}"
print("Part 1:", iterate(puzzle_input))
print("Part 2:", iterate(puzzle_input, 30000000))
| AustinTSchaffer/DailyProgrammer | AdventOfCode/2020/day_15/solution.py | Python | mit | 1,352 |
#!/usr/bin/env python
import os
import time
import argparse
import tempfile
import PyPDF2
import datetime
from reportlab.pdfgen import canvas
parser = argparse.ArgumentParser("Add signatures to PDF files")
parser.add_argument("pdf", help="The pdf file to annotate")
parser.add_argument("signature", help="The signature file (png, jpg)")
parser.add_argument("--date", action='store_true')
parser.add_argument("--output", nargs='?',
help="Output file. Defaults to input filename plus '_signed'")
parser.add_argument("--coords", nargs='?', default='2x100x100x125x40',
help="Coordinates to place signature. Format: PAGExXxYxWIDTHxHEIGHT. 1x200x300x125x40 means page 1, 200 units horizontally from the bottom left, 300 units vertically from the bottom left, 125 units wide, 40 units tall. Pages count starts at 1 (1-based indexing). Units are pdf-standard units (1/72 inch).")
def _get_tmp_filename(suffix=".pdf"):
with tempfile.NamedTemporaryFile(suffix=".pdf") as fh:
return fh.name
def sign_pdf(args):
#TODO: use a gui or something.... for now, just trial-and-error the coords
page_num, x1, y1, width, height = [int(a) for a in args.coords.split("x")]
page_num -= 1
output_filename = args.output or "{}_signed{}".format(
*os.path.splitext(args.pdf)
)
pdf_fh = open(args.pdf, 'rb')
sig_tmp_fh = None
pdf = PyPDF2.PdfFileReader(pdf_fh)
writer = PyPDF2.PdfFileWriter()
sig_tmp_filename = None
for i in range(0, pdf.getNumPages()):
page = pdf.getPage(i)
if i == page_num:
# Create PDF for signature
sig_tmp_filename = _get_tmp_filename()
c = canvas.Canvas(sig_tmp_filename, pagesize=page.cropBox)
c.drawImage(args.signature, x1, y1, width, height, mask='auto')
if args.date:
c.drawString(x1 + width, y1, datetime.datetime.now().strftime("%Y-%m-%d"))
c.showPage()
c.save()
# Merge PDF in to original page
sig_tmp_fh = open(sig_tmp_filename, 'rb')
sig_tmp_pdf = PyPDF2.PdfFileReader(sig_tmp_fh)
sig_page = sig_tmp_pdf.getPage(0)
sig_page.mediaBox = page.mediaBox
page.mergePage(sig_page)
writer.addPage(page)
with open(output_filename, 'wb') as fh:
writer.write(fh)
for handle in [pdf_fh, sig_tmp_fh]:
if handle:
handle.close()
if sig_tmp_filename:
os.remove(sig_tmp_filename)
def main():
sign_pdf(parser.parse_args())
if __name__ == "__main__":
main()
| yourcelf/signpdf | signpdf.py | Python | mit | 2,594 |
#!/usr/bin/python
import pexpect
import sys
import logging
import vt102
import os
import time
def termcheck(child, timeout=0):
time.sleep(0.05)
try:
logging.debug("Waiting for EOF or timeout=%d"%timeout)
child.expect(pexpect.EOF, timeout=timeout)
except pexpect.exceptions.TIMEOUT:
logging.debug("Hit timeout and have %d characters in child.before"%len(child.before))
return child.before
def termkey(child, stream, screen, key, timeout=0):
logging.debug("Sending '%s' to child"%key)
child.send(key)
s = termcheck(child)
logging.debug("Sending child.before text to vt102 stream")
stream.process(child.before)
logging.debug("vt102 screen dump")
logging.debug(screen)
# START LOGGING
logging.basicConfig(filename='menu_demo.log',level=logging.DEBUG)
# SETUP VT102 EMULATOR
#rows, columns = os.popen('stty size', 'r').read().split()
rows, columns = (50,120)
stream=vt102.stream()
screen=vt102.screen((int(rows), int(columns)))
screen.attach(stream)
logging.debug("Setup vt102 with %d %d"%(int(rows),int(columns)))
logging.debug("Starting demo2.py child process...")
child = pexpect.spawn('./demo2.py', maxread=65536, dimensions=(int(rows),int(columns)))
s = termcheck(child)
logging.debug("Sending child.before (len=%d) text to vt102 stream"%len(child.before))
stream.process(child.before)
logging.debug("vt102 screen dump")
logging.debug(screen)
termkey(child, stream, screen, "a")
termkey(child, stream, screen, "1")
logging.debug("Quiting...")
| paulboal/pexpect-curses | demo/demo3.py | Python | mit | 1,476 |
# -*- coding: utf-8 -*-
import json
from axe.http_exceptions import BadJSON
def get_request(request):
return request
def get_query(request):
return request.args
def get_form(request):
return request.form
def get_body(request):
return request.data
def get_headers(request):
return request.headers
def get_cookies(request):
return request.cookies
def get_method(request):
return request.method
def get_json(headers, body):
content_type = headers.get('Content-Type')
if content_type != 'application/json':
return
data = body.decode('utf8')
try:
return json.loads(data)
except ValueError:
raise BadJSON
| soasme/axe | axe/default_exts.py | Python | mit | 680 |
def tryprint():
return ('it will be oke') | cherylyli/stress-aid | env/lib/python3.5/site-packages/helowrld/__init__.py | Python | mit | 45 |
#!/usr/bin/env python
import time
from nicfit.aio import Application
async def _main(args):
print(args)
print("Sleeping 2...")
time.sleep(2)
print("Sleeping 0...")
return 0
def atexit():
print("atexit")
app = Application(_main, atexit=atexit)
app.arg_parser.add_argument("--example", help="Example cli")
app.run()
assert not"will not execute"
| nicfit/nicfit.py | examples/asyncio_example.py | Python | mit | 371 |
# -*- coding: utf-8 -*-
"""
Organization Registry - Controllers
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
return s3db.cms_index(module, alt_function="index_alt")
# -----------------------------------------------------------------------------
def index_alt():
"""
Module homepage for non-Admin users when no CMS content found
"""
# @ToDo: Move this to the Template (separate deployment_setting or else a customise for non-REST controllers)
template = settings.get_template()
if template == "SandyRelief":
# Just redirect to the Facilities
redirect(URL(f="facility"))
else:
# Just redirect to the list of Organisations
redirect(URL(f="organisation"))
# -----------------------------------------------------------------------------
def group():
""" RESTful CRUD controller """
return s3_rest_controller(rheader = s3db.org_rheader)
# -----------------------------------------------------------------------------
def region():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def sector():
""" RESTful CRUD controller """
# Pre-processor
def prep(r):
# Location Filter
s3db.gis_location_filter(r)
return True
s3.prep = prep
return s3_rest_controller()
# -----------------------------------------------------------------------------
def subsector():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def site():
"""
RESTful CRUD controller
- used by S3SiteAutocompleteWidget
which doesn't yet support filtering to just updateable sites
- used by site_contact_person()
- used by S3OptionsFilter (e.g. Asset Log)
"""
# Pre-processor
def prep(r):
if r.representation != "json" and \
r.method not in ("search_ac", "search_address_ac", "site_contact_person"):
return False
# Location Filter
s3db.gis_location_filter(r)
return True
s3.prep = prep
return s3_rest_controller()
# -----------------------------------------------------------------------------
def sites_for_org():
"""
Used to provide the list of Sites for an Organisation
- used in User Registration
"""
try:
org = request.args[0]
except:
result = current.xml.json_message(False, 400, "No Org provided!")
else:
stable = s3db.org_site
if settings.get_org_branches():
# Find all branches for this Organisation
btable = s3db.org_organisation_branch
query = (btable.organisation_id == org) & \
(btable.deleted != True)
rows = db(query).select(btable.branch_id)
org_ids = [row.branch_id for row in rows] + [org]
query = (stable.organisation_id.belongs(org_ids)) & \
(stable.deleted != True)
else:
query = (stable.organisation_id == org) & \
(stable.deleted != True)
rows = db(query).select(stable.site_id,
stable.name,
orderby=stable.name)
result = rows.json()
finally:
response.headers["Content-Type"] = "application/json"
return result
# -----------------------------------------------------------------------------
def facility():
""" RESTful CRUD controller """
return s3db.org_facility_controller()
# -----------------------------------------------------------------------------
def facility_type():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def office_type():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def organisation_type():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def organisation():
""" RESTful CRUD controller """
# Defined in the Model for use from Multiple Controllers for unified menus
return s3db.org_organisation_controller()
# -----------------------------------------------------------------------------
def org_search():
"""
Organisation REST controller
- limited to just search_ac for use in Autocompletes
- allows differential access permissions
"""
s3.prep = lambda r: r.method == "search_ac"
return s3_rest_controller(module, "organisation")
# -----------------------------------------------------------------------------
def organisation_list_represent(l):
organisation_represent = s3db.org_organisation_represent
if l:
max_length = 4
if len(l) > max_length:
return "%s, etc" % \
organisation_represent.multiple(l[:max_length])
else:
return organisation_represent.multiple(l)
else:
return NONE
# -----------------------------------------------------------------------------
def office():
""" RESTful CRUD controller """
# Defined in the Model for use from Multiple Controllers for unified menus
return s3db.org_office_controller()
# -----------------------------------------------------------------------------
def person():
""" Person controller for AddPersonWidget """
def prep(r):
if r.representation != "s3json":
# Do not serve other representations here
return False
else:
current.xml.show_ids = True
return True
s3.prep = prep
return s3_rest_controller("pr", "person")
# -----------------------------------------------------------------------------
def room():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def mailing_list():
""" RESTful CRUD controller """
tablename = "pr_group"
table = s3db[tablename]
# Only groups with a group_type of 5
s3.filter = (table.group_type == 5)
table.group_type.writable = False
table.group_type.readable = False
table.name.label = T("Mailing List Name")
s3.crud_strings[tablename] = s3.pr_mailing_list_crud_strings
# define the list_fields
list_fields = s3db.configure(tablename,
list_fields = ["id",
"name",
"description",
])
# Components
_rheader = s3db.pr_rheader
_tabs = [(T("Organization"), "organisation/"),
(T("Mailing List Details"), None),
]
if len(request.args) > 0:
_tabs.append((T("Members"), "group_membership"))
if "viewing" in request.vars:
tablename, record_id = request.vars.viewing.rsplit(".", 1)
if tablename == "org_organisation":
table = s3db[tablename]
_rheader = s3db.org_rheader
_tabs = []
s3db.add_components("pr_group", pr_group_membership="group_id")
rheader = lambda r: _rheader(r, tabs = _tabs)
return s3_rest_controller("pr",
"group",
rheader=rheader)
# -----------------------------------------------------------------------------
def donor():
""" RESTful CRUD controller """
tablename = "org_donor"
table = s3db[tablename]
tablename = "org_donor"
s3.crud_strings[tablename] = Storage(
label_create = ADD_DONOR,
title_display = T("Donor Details"),
title_list = T("Donors Report"),
title_update = T("Edit Donor"),
label_list_button = T("List Donors"),
label_delete_button = T("Delete Donor"),
msg_record_created = T("Donor added"),
msg_record_modified = T("Donor updated"),
msg_record_deleted = T("Donor deleted"),
msg_list_empty = T("No Donors currently registered"))
s3db.configure(tablename, listadd=False)
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def resource():
""" RESTful CRUD controller """
def prep(r):
if r.interactive:
if r.method in ("create", "update"):
# Context from a Profile page?"
table = r.table
location_id = request.get_vars.get("(location)", None)
if location_id:
field = table.location_id
field.default = location_id
field.readable = field.writable = False
organisation_id = request.get_vars.get("(organisation)", None)
if organisation_id:
field = table.organisation_id
field.default = organisation_id
field.readable = field.writable = False
return True
s3.prep = prep
return s3_rest_controller()
# -----------------------------------------------------------------------------
def resource_type():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def service():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def req_match():
""" Match Requests for Sites """
return s3db.req_match()
# -----------------------------------------------------------------------------
def incoming():
"""
Incoming Shipments for Sites
@unused
"""
return inv_incoming()
# -----------------------------------------------------------------------------
def facility_geojson():
"""
Create GeoJSON[P] of Facilities for use by a high-traffic website
- controller just for testing
- function normally run on a schedule
"""
s3db.org_facility_geojson()
# END =========================================================================
| code-for-india/sahana_shelter_worldbank | controllers/org.py | Python | mit | 10,621 |
# coding: utf8
from wsgidav.dav_provider import DAVCollection, DAVNonCollection
from wsgidav.dav_error import DAVError, HTTP_FORBIDDEN
from wsgidav import util
from wsgidav.addons.tracim import role, MyFileStream
from time import mktime
from datetime import datetime
from os.path import normpath, dirname, basename
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class Root(DAVCollection):
def __init__(self, path, environ):
super(Root, self).__init__(path, environ)
def __repr__(self):
return 'Root folder'
def getCreationDate(self):
return mktime(datetime.now().timetuple())
def getDisplayName(self):
return 'Tracim - Home'
def getLastModified(self):
return mktime(datetime.now().timetuple())
def getMemberNames(self):
return self.provider.get_all_workspaces(only_name=True)
def getMember(self, workspace_name):
workspace = self.provider.get_workspace({'label': workspace_name})
if not self.provider.has_right(
self.environ["http_authenticator.username"],
workspace.workspace_id,
role["READER"]
):
return None
return Workspace(self.path + workspace.label, self.environ, workspace)
def createEmptyResource(self, name):
raise DAVError(HTTP_FORBIDDEN)
def createCollection(self, name):
raise DAVError(HTTP_FORBIDDEN)
def getMemberList(self):
memberlist = []
for name in self.getMemberNames():
member = self.getMember(name)
if member is not None:
memberlist.append(member)
return memberlist
class Workspace(DAVCollection):
def __init__(self, path, environ, workspace):
super(Workspace, self).__init__(path, environ)
self.workspace = workspace
def __repr__(self):
return "Workspace: %s" % self.workspace.label
def getCreationDate(self):
return mktime(self.workspace.created.timetuple())
def getDisplayName(self):
return self.workspace.label
def getLastModified(self):
return mktime(self.workspace.updated.timetuple())
def getMemberNames(self):
return self.provider.get_workspace_children_id(self.workspace)
def getMember(self, item_id):
item = self.provider.get_item({'id': item_id, 'child_revision_id': None})
if not self.provider.has_right(
self.environ["http_authenticator.username"],
item.workspace_id,
role["READER"]
):
return None
return Folder(self.path + item.item_name, self.environ, item)
def createEmptyResource(self, name):
raise DAVError(HTTP_FORBIDDEN)
def createCollection(self, name):
assert "/" not in name
if not self.provider.has_right(
self.environ["http_authenticator.username"],
self.workspace.workspace_id,
role["CONTENT_MANAGER"]
):
raise DAVError(HTTP_FORBIDDEN)
item = self.provider.add_item(
item_name=name,
item_type="FOLDER",
workspace_id=self.workspace.workspace_id
)
return Folder(self.path + name, self.environ, item)
def delete(self):
if not self.provider.has_right(
self.environ["http_authenticator.username"],
self.workspace.workspace_id,
role["WORKSPACE_MANAGER"]
):
raise DAVError(HTTP_FORBIDDEN)
self.provider.delete_workspace(self.workspace)
self.removeAllLocks(True)
def copyMoveSingle(self, destpath, ismove):
if ismove:
self.provider.set_workspace_label(self.workspace, basename(normpath(destpath)))
else:
self.provider.add_workspace(basename(normpath(destpath)))
def supportRecursiveMove(self, destpath):
return True
def moveRecursive(self, destpath):
if not self.provider.has_right(
self.environ["http_authenticator.username"],
self.workspace.workspace_id,
role["WORKSPACE_MANAGER"]
) or dirname(normpath(destpath)) != '/':
raise DAVError(HTTP_FORBIDDEN)
self.provider.set_workspace_label(self.workspace, basename(normpath(destpath)))
def setLastModified(self, destpath, timestamp, dryrun):
return False
def getMemberList(self):
memberlist = []
for name in self.getMemberNames():
member = self.getMember(name)
if member is not None:
memberlist.append(member)
return memberlist
class Folder(DAVCollection):
def __init__(self, path, environ, item):
super(Folder, self).__init__(path, environ)
self.item = item
def __repr__(self):
return "Folder: %s" % self.item.item_name
def getCreationDate(self):
return mktime(self.item.created.timetuple())
def getDisplayName(self):
return self.item.item_name
def getLastModified(self):
return mktime(self.item.updated.timetuple())
def getMemberNames(self):
return self.provider.get_item_children(self.item.id)
def getMember(self, item_id):
if not self.provider.has_right(
self.environ["http_authenticator.username"],
self.item.workspace_id,
role["READER"]
):
return None
item = self.provider.get_item({'id': item_id, 'child_revision_id': None})
return self.provider.getResourceInst(self.path + item.item_name, self.environ)
def createEmptyResource(self, name):
assert "/" not in name
if not self.provider.has_right(
self.environ["http_authenticator.username"],
self.item.workspace_id,
role["CONTRIBUTOR"]
):
raise DAVError(HTTP_FORBIDDEN)
item = self.provider.add_item(
item_name=name,
item_type="FILE",
workspace_id=self.item.workspace_id,
parent_id=self.item.id
)
return File(self.path + name, self.environ, item)
def createCollection(self, name):
assert "/" not in name
if not self.provider.has_right(
self.environ["http_authenticator.username"],
self.item.workspace_id,
role["CONTENT_MANAGER"]
):
raise DAVError(HTTP_FORBIDDEN)
item = self.provider.add_item(
item_name=name,
item_type="FOLDER",
workspace_id=self.item.workspace_id,
parent_id=self.item.id
)
return Folder(self.path + name, self.environ, item)
def delete(self):
if not self.provider.has_right(
self.environ["http_authenticator.username"],
self.item.workspace_id,
role["CONTENT_MANAGER"]
):
raise DAVError(HTTP_FORBIDDEN)
self.provider.delete_item(self.item)
self.removeAllLocks(True)
def copyMoveSingle(self, destpath, ismove):
if not self.provider.has_right(
self.environ["http_authenticator.username"],
self.item.workspace_id,
role["CONTENT_MANAGER"]
) or dirname(normpath(destpath)) == '/':
raise DAVError(HTTP_FORBIDDEN)
if ismove:
self.provider.move_item(self.item, destpath)
else:
self.provider.copy_item(self.item, destpath)
def supportRecursiveMove(self, destpath):
return True
def moveRecursive(self, destpath):
self.copyMoveSingle(destpath, True)
def setLastModified(self, destpath, timestamp, dryrun):
return False
def getMemberList(self, copyOrMove=False):
memberlist = []
for name in self.getMemberNames():
member = self.getMember(name)
if member is not None:
memberlist.append(member)
print "j'ai : ", copyOrMove
if memberlist != [] and not copyOrMove:
memberlist.append(HistoryFolder(self.path + ".history", self.environ, self.item))
return memberlist
def getDescendants(self, collections=True, resources=True,
depthFirst=False, depth="infinity", addSelf=False, copyOrMove=False):
assert depth in ("0", "1", "infinity")
res = []
if addSelf and not depthFirst:
res.append(self)
if depth != "0" and self.isCollection:
for child in self.getMemberList(copyOrMove):
if not child:
_ = self.getMemberList(copyOrMove)
want = (collections and child.isCollection) or (resources and not child.isCollection)
if want and not depthFirst:
res.append(child)
if child.isCollection and depth == "infinity":
res.extend(child.getDescendants(collections, resources, depthFirst, depth, addSelf=False, copyOrMove=copyOrMove))
if want and depthFirst:
res.append(child)
if addSelf and depthFirst:
res.append(self)
return res
class HistoryFolder(Folder):
def __init__(self, path, environ, item):
super(HistoryFolder, self).__init__(path, environ, item)
def __repr__(self):
return "Folder history of : %s" % self.item.item_name
def getCreationDate(self):
return mktime(datetime.now().timetuple())
def getDisplayName(self):
return '.history'
def getLastModified(self):
return mktime(datetime.now().timetuple())
def getMember(self, item_id):
if not self.provider.has_right(
self.environ["http_authenticator.username"],
self.item.workspace_id,
role["READER"]
):
return None
item = self.provider.get_item({'id': item_id, 'child_revision_id': None})
if item.item_type == 'FOLDER':
return None
return HistoryFileFolder(self.path + item.item_name, self.environ, item)
def createEmptyResource(self, name):
raise DAVError(HTTP_FORBIDDEN)
def createCollection(self, name):
raise DAVError(HTTP_FORBIDDEN)
def handleDelete(self):
return True
def handleCopy(self, destPath, depthInfinity):
return True
def handleMove(self, destPath):
return True
def setLastModified(self, destpath, timestamp, dryrun):
return False
def getMemberList(self, copyOrMove=False):
memberlist = []
for name in self.getMemberNames():
member = self.getMember(name)
if member is not None:
memberlist.append(member)
return memberlist
class HistoryFileFolder(HistoryFolder):
def __init__(self, path, environ, item):
super(HistoryFileFolder, self).__init__(path, environ, item)
def __repr__(self):
return "File folder history of : %s" % self.item.item_name
def getCreationDate(self):
return mktime(datetime.now().timetuple())
def getDisplayName(self):
return self.item.item_name
def createCollection(self, name):
raise DAVError(HTTP_FORBIDDEN)
def getLastModified(self):
return mktime(datetime.now().timetuple())
def getMemberNames(self):
return self.provider.get_all_revisions_from_item(self.item, only_id=True)
def getMember(self, item_id):
if not self.provider.has_right(
self.environ["http_authenticator.username"],
self.item.workspace_id,
role["READER"]):
return None
item = self.provider.get_item({'id': item_id})
if item.item_type in ["FILE"]:
return HistoryFile(self.path + str(item.id) + '-' + item.item_name , self.environ, item)
else:
return HistoryOtherFile(self.path + str(item.id) + '-' + item.item_name, self.environ, item)
class File(DAVNonCollection):
def __init__(self, path, environ, item):
super(File, self).__init__(path, environ)
self.item = item
self.filestream = MyFileStream(self.provider, self.item)
def __repr__(self):
return "File: %s" % self.item.item_name
def getContentLength(self):
return len(self.item.item_content)
def getContentType(self):
return util.guessMimeType(self.item.item_name)
def getCreationDate(self):
return mktime(self.item.created.timetuple())
def getDisplayName(self):
return self.item.item_name
def getLastModified(self):
return mktime(self.item.updated.timetuple())
def getContent(self):
filestream = StringIO()
filestream.write(self.item.item_content)
filestream.seek(0)
return filestream
def beginWrite(self, contentType=None):
return self.filestream
def delete(self):
if not self.provider.has_right(
self.environ["http_authenticator.username"],
self.item.workspace_id,
role["CONTENT_MANAGER"]
):
raise DAVError(HTTP_FORBIDDEN)
self.provider.delete_item(self.item)
self.removeAllLocks(True)
def copyMoveSingle(self, destpath, ismove):
if not self.provider.has_right(
self.environ["http_authenticator.username"],
self.provider.get_workspace_id_from_path(destpath),
role["CONTRIBUTOR"]
) or not self.provider.has_right(
self.environ["http_authenticator.username"],
self.item.workspace_id,
role["READER"]
) or dirname(normpath(destpath)) == '/' \
or dirname(dirname(normpath(destpath))) == '/':
raise DAVError(HTTP_FORBIDDEN)
if ismove:
self.provider.move_all_revisions(self.item, destpath)
else:
self.provider.copy_item(self.item, destpath)
def supportRecursiveMove(self, dest):
return True
def moveRecursive(self, destpath):
self.copyMoveSingle(destpath, True)
def setLastModified(self, dest, timestamp, dryrun):
return False
class HistoryFile(File):
def __init__(self, path, environ, item):
super(HistoryFile, self).__init__(path, environ, item)
def __repr__(self):
return "File history: %s-%s" % (self.item.item_name, self.item.id)
def getDisplayName(self):
return str(self.item.id) + '-' + self.item.item_name
def beginWrite(self, contentType=None):
raise DAVError(HTTP_FORBIDDEN)
def delete(self):
raise DAVError(HTTP_FORBIDDEN)
def handleDelete(self):
return True
def handleCopy(self, destPath, depthInfinity):
return True
def handleMove(self, destPath):
return True
def copyMoveSingle(self, destpath, ismove):
raise DAVError(HTTP_FORBIDDEN)
class OtherFile(File):
def __init__(self, path, environ, item):
super(OtherFile, self).__init__(path, environ, item)
self.content = self.design(self.item.item_content)
def __repr__(self):
return "File: %s" % self.item.item_name
def getContentLength(self):
return len(self.content)
def getContentType(self):
return 'text/html'
def getContent(self):
filestream = StringIO()
filestream.write(self.content)
filestream.seek(0)
return filestream
def design(self, content):
f = open('wsgidav/addons/tracim/style.css', 'r')
style = f.read()
f.close()
file = '''
<html>
<head>
<title>Hey</title>
<style>%s</style>
</head>
<body>
<div>
%s
</div>
</body>
</html>
''' % (style, content)
return file
class HistoryOtherFile(OtherFile):
def __init__(self, path, environ, item):
super(HistoryOtherFile, self).__init__(path, environ, item)
self.content = self.design(self.item.item_content)
def __repr__(self):
return "File history: %s-%s" % (self.item.item_name, self.item.id)
def getDisplayName(self):
return str(self.item.id) + '-' + self.item.item_name
def beginWrite(self, contentType=None):
raise DAVError(HTTP_FORBIDDEN)
def delete(self):
raise DAVError(HTTP_FORBIDDEN)
def handleDelete(self):
return True
def handleCopy(self, destPath, depthInfinity):
return True
def handleMove(self, destPath):
return True
def copyMoveSingle(self, destpath, ismove):
raise DAVError(HTTP_FORBIDDEN)
| tracim/tracim-webdav | wsgidav/addons/tracim/sql_resources.py | Python | mit | 16,929 |
# coding=utf-8
# Bootstrap installation of setuptools
from ez_setup import use_setuptools
use_setuptools()
import os
import sys
from fnmatch import fnmatchcase
from distutils.util import convert_path
from propane_distribution import cmdclassdict
from setuptools import setup, find_packages
from engineer import version
PROJECT = 'engineer'
################################################################################
# find_package_data written by Ian Bicking.
# Provided as an attribute, so you can append to these instead
# of replicating them:
standard_exclude = ('*.py', '*.pyc', '*~', '.*', '*.bak', '*.swp*')
standard_exclude_directories = ('.*', 'CVS', '_darcs', './build',
'./dist', 'EGG-INFO', '*.egg-info')
def find_package_data(
where='.', package='',
exclude=standard_exclude,
exclude_directories=standard_exclude_directories,
only_in_packages=True,
show_ignored=False):
"""
Return a dictionary suitable for use in ``package_data``
in a distutils ``setup.py`` file.
The dictionary looks like::
{'package': [files]}
Where ``files`` is a list of all the files in that package that
don't match anything in ``exclude``.
If ``only_in_packages`` is true, then top-level directories that
are not packages won't be included (but directories under packages
will).
Directories matching any pattern in ``exclude_directories`` will
be ignored; by default directories with leading ``.``, ``CVS``,
and ``_darcs`` will be ignored.
If ``show_ignored`` is true, then all the files that aren't
included in package data are shown on stderr (for debugging
purposes).
Note patterns use wildcards, or can be exact paths (including
leading ``./``), and all searching is case-insensitive.
This function is by Ian Bicking.
"""
out = {}
stack = [(convert_path(where), '', package, only_in_packages)]
while stack:
where, prefix, package, only_in_packages = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if fnmatchcase(name, pattern) or fn.lower() == pattern.lower():
bad_name = True
if show_ignored:
print >> sys.stderr, (
"Directory %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
if os.path.isfile(os.path.join(fn, '__init__.py')):
if not package:
new_package = name
else:
new_package = package + '.' + name
stack.append((fn, '', new_package, False))
else:
stack.append((fn, prefix + name + '/', package, only_in_packages))
elif package or not only_in_packages:
# is a file
bad_name = False
for pattern in exclude:
if fnmatchcase(name, pattern) or fn.lower() == pattern.lower():
bad_name = True
if show_ignored:
print >> sys.stderr, (
"File %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
out.setdefault(package, []).append(prefix + name)
return out
################################################################################
# noinspection PyShadowingBuiltins
def get_install_requirements(requirements_file='requirements.txt'):
requirements = []
with open(requirements_file) as file:
temp = file.readlines()
temp = [i[:-1] for i in temp]
for line in temp:
if line is None or line == '' or line.startswith(('#', '-e', '-r')):
continue
else:
requirements.append(line)
return requirements
# noinspection PyShadowingBuiltins
def get_readme():
with open('README.md') as file:
return file.read()
setup(
name=PROJECT,
version=version.string,
author='Tyler Butler',
author_email='[email protected]',
platforms='any',
packages=find_packages(),
entry_points={
'console_scripts': [
'engineer=engineer.engine:cmdline',
'engineer_dev=engineer.devtools:main [dev]'
],
},
url='http://github.com/tylerbutler/engineer',
license='MIT',
description='A static website generator.',
long_description=get_readme(),
install_requires=get_install_requirements(),
tests_require=get_install_requirements('requirements_tests.txt'),
extras_require={
'dev': ['argh', 'clint']
},
cmdclass=cmdclassdict,
include_package_data=True,
package_data=find_package_data(PROJECT,
package=PROJECT,
only_in_packages=False),
# Setting to False doesn't create an egg - easier to debug and hack on
zip_safe=True,
)
| tylerbutler/engineer | setup.py | Python | mit | 5,374 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._test_base import TestBase
__all__ = ['TestBase']
| Azure/azure-sdk-for-python | sdk/testbase/azure-mgmt-testbase/azure/mgmt/testbase/aio/__init__.py | Python | mit | 524 |
# Ant
#
# Copyright (c) 2012, Gustav Tiger <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import collections
import threading
import logging
import Queue
from ant.base.ant import Ant
from ant.base.message import Message
from ant.easy.channel import Channel
from ant.easy.filter import wait_for_event, wait_for_response, wait_for_special
_logger = logging.getLogger("garmin.ant.easy.node")
class Node():
def __init__(self):
self._responses_cond = threading.Condition()
self._responses = collections.deque()
self._event_cond = threading.Condition()
self._events = collections.deque()
self._datas = Queue.Queue()
self.channels = {}
self.ant = Ant()
self._running = True
self._worker_thread = threading.Thread(target=self._worker, name="ant.easy")
self._worker_thread.start()
def new_channel(self, ctype):
channel = Channel(0, self, self.ant)
self.channels[0] = channel
channel._assign(ctype, 0x00)
return channel
def request_message(self, messageId):
_logger.debug("requesting message %#02x", messageId)
self.ant.request_message(0, messageId)
_logger.debug("done requesting message %#02x", messageId)
return self.wait_for_special(messageId)
def set_network_key(self, network, key):
self.ant.set_network_key(network, key)
return self.wait_for_response(Message.ID.SET_NETWORK_KEY)
def wait_for_event(self, ok_codes):
return wait_for_event(ok_codes, self._events, self._event_cond)
def wait_for_response(self, event_id):
return wait_for_response(event_id, self._responses, self._responses_cond)
def wait_for_special(self, event_id):
return wait_for_special(event_id, self._responses, self._responses_cond)
def _worker_response(self, channel, event, data):
self._responses_cond.acquire()
self._responses.append((channel, event, data))
self._responses_cond.notify()
self._responses_cond.release()
def _worker_event(self, channel, event, data):
if event == Message.Code.EVENT_RX_BURST_PACKET:
self._datas.put(('burst', channel, data))
elif event == Message.Code.EVENT_RX_BROADCAST:
self._datas.put(('broadcast', channel, data))
else:
self._event_cond.acquire()
self._events.append((channel, event, data))
self._event_cond.notify()
self._event_cond.release()
def _worker(self):
self.ant.response_function = self._worker_response
self.ant.channel_event_function = self._worker_event
# TODO: check capabilities
self.ant.start()
def _main(self):
while self._running:
try:
(data_type, channel, data) = self._datas.get(True, 1.0)
self._datas.task_done()
if data_type == 'broadcast':
self.channels[channel].on_broadcast_data(data)
elif data_type == 'burst':
self.channels[channel].on_burst_data(data)
else:
_logger.warning("Unknown data type '%s': %r", data_type, data)
except Queue.Empty as e:
pass
def start(self):
self._main()
def stop(self):
if self._running:
_logger.debug("Stoping ant.easy")
self._running = False
self.ant.stop()
self._worker_thread.join()
| ddboline/Garmin-Forerunner-610-Extractor_fork | ant/easy/node.py | Python | mit | 4,653 |
import os
from ConfigParser import ConfigParser
from amlib import argp
tmp_conf = ConfigParser()
tmp_path = os.path.dirname(os.path.abspath(__file__)) # /base/lib/here
tmp_path = tmp_path.split('/')
conf_path = '/'.join(tmp_path[0:-1]) # /base/lib
tmp_conf.read(conf_path+'/ampush.conf')
c = {}
c.update(tmp_conf.items('default'))
# select target AD container: default or user-specified with --mode?
if argp.a['mode'] is not None:
try:
container_conf_key = 'am_container_' + argp.a['mode']
c['am_container'] = c[container_conf_key]
except KeyError:
log_msg = 'Terminating. No such parameter in ampush.conf: ' + \
container_conf_key
raise Exception(log_msg)
else:
c['am_container'] = c['am_container_default']
# select alternate flat file automount maps: default or user-specified
# set passed via --source?
if argp.a['source'] is not None:
try:
ff_map_dir_conf_key = 'flat_file_map_dir_' + argp.a['source']
c['flat_file_map_dir'] = c[ff_map_dir_conf_key]
except KeyError:
log_msg = 'Terminating. No such parameter in ampush.conf: ' + \
ff_map_dir_conf_key
raise Exception(log_msg)
else:
c['flat_file_map_dir'] = c['flat_file_map_dir_default']
| sfu-rcg/ampush | amlib/conf.py | Python | mit | 1,274 |
# encoding: utf-8
from __future__ import unicode_literals
import operator
import pytest
from marrow.mongo import Filter
from marrow.schema.compat import odict, py3
@pytest.fixture
def empty_ops(request):
return Filter()
@pytest.fixture
def single_ops(request):
return Filter({'roll': 27})
def test_ops_iteration(single_ops):
assert list(iter(single_ops)) == ['roll']
class TestOpsMapping(object):
def test_getitem(self, empty_ops, single_ops):
with pytest.raises(KeyError):
empty_ops['roll']
assert single_ops['roll'] == 27
def test_setitem(self, empty_ops):
assert repr(empty_ops) == "Filter([])"
empty_ops['meaning'] = 42
if py3:
assert repr(empty_ops) == "Filter([('meaning', 42)])"
else:
assert repr(empty_ops) == "Filter([(u'meaning', 42)])"
def test_delitem(self, empty_ops, single_ops):
with pytest.raises(KeyError):
del empty_ops['roll']
if py3:
assert repr(single_ops) == "Filter([('roll', 27)])"
else:
assert repr(single_ops) == "Filter([(u'roll', 27)])"
del single_ops['roll']
assert repr(single_ops) == "Filter([])"
def test_length(self, empty_ops, single_ops):
assert len(empty_ops) == 0
assert len(single_ops) == 1
def test_keys(self, empty_ops, single_ops):
assert list(empty_ops.keys()) == []
assert list(single_ops.keys()) == ['roll']
def test_items(self, empty_ops, single_ops):
assert list(empty_ops.items()) == []
assert list(single_ops.items()) == [('roll', 27)]
def test_values(self, empty_ops, single_ops):
assert list(empty_ops.values()) == []
assert list(single_ops.values()) == [27]
def test_contains(self, single_ops):
assert 'foo' not in single_ops
assert 'roll' in single_ops
def test_equality_inequality(self, empty_ops, single_ops):
assert empty_ops == {}
assert empty_ops != {'roll': 27}
assert single_ops != {}
assert single_ops == {'roll': 27}
def test_get(self, single_ops):
assert single_ops.get('foo') is None
assert single_ops.get('foo', 42) == 42
assert single_ops.get('roll') == 27
def test_clear(self, single_ops):
assert len(single_ops.operations) == 1
single_ops.clear()
assert len(single_ops.operations) == 0
def test_pop(self, single_ops):
assert len(single_ops.operations) == 1
with pytest.raises(KeyError):
single_ops.pop('foo')
assert single_ops.pop('foo', 42) == 42
assert len(single_ops.operations) == 1
assert single_ops.pop('roll') == 27
assert len(single_ops.operations) == 0
def test_popitem(self, single_ops):
assert len(single_ops.operations) == 1
assert single_ops.popitem() == ('roll', 27)
assert len(single_ops.operations) == 0
with pytest.raises(KeyError):
single_ops.popitem()
def test_update(self, empty_ops, single_ops):
assert len(empty_ops.operations) == 0
empty_ops.update(name="Bob Dole")
assert len(empty_ops.operations) == 1
if py3:
assert repr(empty_ops) == "Filter([('name', 'Bob Dole')])"
else:
assert repr(empty_ops) == "Filter([('name', u'Bob Dole')])"
assert len(single_ops.operations) == 1
if py3:
assert repr(single_ops) == "Filter([('roll', 27)])"
else:
assert repr(single_ops) == "Filter([(u'roll', 27)])"
single_ops.update([('name', "Bob Dole")])
assert len(single_ops.operations) == 2
if py3:
assert repr(single_ops) in ("Filter([('roll', 27), ('name', 'Bob Dole')])", "Filter([('name', 'Bob Dole'), ('roll', 27)])")
else:
assert repr(single_ops) in ("Filter([(u'roll', 27), (u'name', u'Bob Dole')])", "Filter([(u'name', u'Bob Dole'), (u'roll', 27)])")
def test_setdefault(self, empty_ops):
assert len(empty_ops.operations) == 0
empty_ops.setdefault('fnord', 42)
assert len(empty_ops.operations) == 1
assert empty_ops.operations['fnord'] == 42
empty_ops.setdefault('fnord', 27)
assert len(empty_ops.operations) == 1
assert empty_ops.operations['fnord'] == 42
def test_ops_shallow_copy(self, single_ops):
assert single_ops.operations == single_ops.copy().operations
class TestOperationsCombination(object):
def test_operations_and_clean_merge(self):
comb = Filter({'roll': 27}) & Filter({'foo': 42})
assert comb.as_query == {'roll': 27, 'foo': 42}
def test_operations_and_operator_overlap(self):
comb = Filter({'roll': {'$gte': 27}}) & Filter({'roll': {'$lte': 42}})
assert comb.as_query == {'roll': {'$gte': 27, '$lte': 42}}
def test_paradoxical_condition(self):
comb = Filter({'roll': 27}) & Filter({'roll': {'$lte': 42}})
assert comb.as_query == {'roll': {'$eq': 27, '$lte': 42}}
comb = Filter({'roll': {'$gte': 27}}) & Filter({'roll': 42})
assert list(comb.as_query['roll'].items()) in ([('$gte', 27), ('$eq', 42)], [('$eq', 42), ('$gte', 27)])
def test_operations_or_clean_merge(self):
comb = Filter({'roll': 27}) | Filter({'foo': 42})
assert comb.as_query == {'$or': [{'roll': 27}, {'foo': 42}]}
comb = comb | Filter({'bar': 'baz'})
assert comb.as_query == {'$or': [{'roll': 27}, {'foo': 42}, {'bar': 'baz'}]}
def test_operations_hard_and(self):
comb = Filter({'$and': [{'a': 1}, {'b': 2}]}) & Filter({'$and': [{'c': 3}]})
assert comb.as_query == {'$and': [{'a': 1}, {'b': 2}, {'c': 3}]}
def test_operations_soft_and(self):
comb = Filter({'$and': [{'a': 1}, {'b': 2}]}) & Filter({'c': 3})
assert comb.as_query == {'$and': [{'a': 1}, {'b': 2}], 'c': 3}
| marrow/mongo | test/query/test_ops.py | Python | mit | 5,358 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancersOperations:
"""LoadBalancersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2016_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
load_balancer_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
load_balancer_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'} # type: ignore
async def get(
self,
resource_group_name: str,
load_balancer_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.LoadBalancer":
"""Gets the specified load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LoadBalancer, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2016_09_01.models.LoadBalancer
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LoadBalancer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
load_balancer_name: str,
parameters: "_models.LoadBalancer",
**kwargs: Any
) -> "_models.LoadBalancer":
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'LoadBalancer')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('LoadBalancer', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('LoadBalancer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
load_balancer_name: str,
parameters: "_models.LoadBalancer",
**kwargs: Any
) -> AsyncLROPoller["_models.LoadBalancer"]:
"""Creates or updates a load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param parameters: Parameters supplied to the create or update load balancer operation.
:type parameters: ~azure.mgmt.network.v2016_09_01.models.LoadBalancer
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either LoadBalancer or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2016_09_01.models.LoadBalancer]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancer"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('LoadBalancer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'} # type: ignore
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.LoadBalancerListResult"]:
"""Gets all the load balancers in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2016_09_01.models.LoadBalancerListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancerListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/loadBalancers'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.LoadBalancerListResult"]:
"""Gets all the load balancers in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2016_09_01.models.LoadBalancerListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancerListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers'} # type: ignore
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2016_09_01/aio/operations/_load_balancers_operations.py | Python | mit | 23,464 |
"""Contains tests for oweb.views.updates.item_update"""
# Python imports
from unittest import skip
# Django imports
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from django.contrib.auth.models import User
# app imports
from oweb.tests import OWebViewTests
from oweb.models.account import Account
from oweb.models.research import Research
from oweb.models.ship import Ship
from oweb.models.planet import Planet, Moon
from oweb.models.building import Building
from oweb.models.defense import Defense
@override_settings(AUTH_USER_MODEL='auth.User')
class OWebViewsItemUpdateTests(OWebViewTests):
def test_login_required(self):
"""Unauthenticated users should be redirected to oweb:app_login"""
r = self.client.get(reverse('oweb:item_update'))
self.assertRedirects(r,
reverse('oweb:app_login'),
status_code=302,
target_status_code=200)
def test_account_owner(self):
"""Can somebody update an item he doesn't posess?"""
u = User.objects.get(username='test01')
acc = Account.objects.get(owner=u)
res_pre = Research.objects.filter(account=acc).first()
self.client.login(username='test02', password='foo')
r = self.client.post(reverse('oweb:item_update'),
data={ 'item_type': 'research',
'item_id': res_pre.id,
'item_level': res_pre.level + 1 },
HTTP_REFERER=reverse('oweb:account_research',
args=[acc.id]))
self.assertEqual(r.status_code, 403)
self.assertTemplateUsed(r, 'oweb/403.html')
def test_no_post(self):
"""What if no POST data is supplied?"""
self.client.login(username='test01', password='foo')
r = self.client.post(reverse('oweb:item_update'))
self.assertEqual(r.status_code, 500)
self.assertTemplateUsed(r, 'oweb/500.html')
def test_research_update(self):
"""Does ``item_update()`` correctly update researches?
Basically the Django ORM can be trusted, but since there is some logic
involved in determine the correct field to update, this test is
included
"""
u = User.objects.get(username='test01')
acc = Account.objects.get(owner=u)
res_pre = Research.objects.filter(account=acc).first()
self.client.login(username='test01', password='foo')
r = self.client.post(reverse('oweb:item_update'),
data={ 'item_type': 'research',
'item_id': res_pre.id,
'item_level': res_pre.level + 1 },
HTTP_REFERER=reverse('oweb:account_research',
args=[acc.id]))
self.assertRedirects(r,
reverse('oweb:account_research', args=[acc.id]),
status_code=302,
target_status_code=200)
res_post = Research.objects.get(pk=res_pre.pk)
self.assertEqual(res_pre.level + 1, res_post.level)
def test_ship_update(self):
"""Does ``item_update()`` correctly update ships?
Basically the Django ORM can be trusted, but since there is some logic
involved in determine the correct field to update, this test is
included
"""
u = User.objects.get(username='test01')
acc = Account.objects.get(owner=u)
ship_pre = Ship.objects.filter(account=acc).first()
self.client.login(username='test01', password='foo')
r = self.client.post(reverse('oweb:item_update'),
data={ 'item_type': 'ship',
'item_id': ship_pre.id,
'item_level': ship_pre.count + 1338 },
HTTP_REFERER=reverse('oweb:account_ships',
args=[acc.id]))
self.assertRedirects(r,
reverse('oweb:account_ships', args=[acc.id]),
status_code=302,
target_status_code=200)
ship_post = Ship.objects.get(pk=ship_pre.pk)
self.assertEqual(ship_pre.count + 1338, ship_post.count)
def test_building_update(self):
"""Does ``item_update()`` correctly update buildings?
Basically the Django ORM can be trusted, but since there is some logic
involved in determine the correct field to update, this test is
included
"""
u = User.objects.get(username='test01')
acc = Account.objects.get(owner=u)
p = Planet.objects.filter(account=acc).first()
b_pre = Building.objects.filter(astro_object=p).first()
self.client.login(username='test01', password='foo')
r = self.client.post(reverse('oweb:item_update'),
data={ 'item_type': 'building',
'item_id': b_pre.id,
'item_level': b_pre.level - 1 },
HTTP_REFERER=reverse('oweb:planet_buildings',
args=[p.id]))
self.assertRedirects(r,
reverse('oweb:planet_buildings', args=[p.id]),
status_code=302,
target_status_code=200)
b_post = Building.objects.get(pk=b_pre.pk)
self.assertEqual(b_pre.level - 1, b_post.level)
def test_moon_building_update(self):
"""Does ``item_update()`` correctly update moon buildings?
Basically the Django ORM can be trusted, but since there is some logic
involved in determine the correct field to update, this test is
included
"""
u = User.objects.get(username='test01')
acc = Account.objects.get(owner=u)
p = Planet.objects.filter(account=acc).values_list('id', flat=True)
m = Moon.objects.filter(planet__in=p).first()
b_pre = Building.objects.filter(astro_object=m).first()
self.client.login(username='test01', password='foo')
r = self.client.post(reverse('oweb:item_update'),
data={ 'item_type': 'moon_building',
'item_id': b_pre.id,
'item_level': b_pre.level + 2 },
HTTP_REFERER=reverse('oweb:moon_buildings',
args=[m.id]))
self.assertRedirects(r,
reverse('oweb:moon_buildings', args=[m.id]),
status_code=302,
target_status_code=200)
b_post = Building.objects.get(pk=b_pre.pk)
self.assertEqual(b_pre.level + 2, b_post.level)
def test_defense_update(self):
"""Does ``item_update()`` correctly update defense devices?
Basically the Django ORM can be trusted, but since there is some logic
involved in determine the correct field to update, this test is
included
"""
u = User.objects.get(username='test01')
acc = Account.objects.get(owner=u)
p = Planet.objects.filter(account=acc).first()
d_pre = Defense.objects.filter(astro_object=p).first()
self.client.login(username='test01', password='foo')
r = self.client.post(reverse('oweb:item_update'),
data={ 'item_type': 'defense',
'item_id': d_pre.id,
'item_level': d_pre.count - 1 },
HTTP_REFERER=reverse('oweb:planet_defense',
args=[p.id]))
self.assertRedirects(r,
reverse('oweb:planet_defense', args=[p.id]),
status_code=302,
target_status_code=200)
d_post = Defense.objects.get(pk=d_pre.pk)
self.assertEqual(d_pre.count - 1, d_post.count)
def test_moon_defense_update(self):
"""Does ``item_update()`` correctly update moon defense devices?
Basically the Django ORM can be trusted, but since there is some logic
involved in determine the correct field to update, this test is
included
"""
u = User.objects.get(username='test01')
acc = Account.objects.get(owner=u)
p = Planet.objects.filter(account=acc).values_list('id', flat=True)
m = Moon.objects.filter(planet__in=p).first()
d_pre = Defense.objects.filter(astro_object=m).first()
self.client.login(username='test01', password='foo')
r = self.client.post(reverse('oweb:item_update'),
data={ 'item_type': 'moon_defense',
'item_id': d_pre.id,
'item_level': d_pre.count - 10000 },
HTTP_REFERER=reverse('oweb:moon_defense',
args=[m.id]))
self.assertRedirects(r,
reverse('oweb:moon_defense', args=[m.id]),
status_code=302,
target_status_code=200)
d_post = Defense.objects.get(pk=d_pre.pk)
self.assertEqual(0, d_post.count)
def test_unknown_item_type(self):
"""Does ``item_update()`` correctly handle unknown item_types?"""
self.client.login(username='test01', password='foo')
r = self.client.post(reverse('oweb:item_update'),
data={
'item_type': 'foobar',
'item_id': 1,
'item_level': 1
})
self.assertEqual(r.status_code, 500)
self.assertTemplateUsed(r, 'oweb/500.html')
| Mischback/django-oweb | oweb/tests/views/item_update.py | Python | mit | 10,246 |
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings.prod')
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| monovertex/ygorganizer | manage.py | Python | mit | 247 |
import os
import sys
import pygame
import signal
import time
import ConfigParser
from twython import TwythonStreamer
#-----------------------------------------------------------------------------
# Import custom modules
#-----------------------------------------------------------------------------
# Add pyscope module to path
path = os.path.join(os.path.dirname(__file__), 'py_apps/pyscope')
sys.path.append(path)
# Add twit_feed module to path
path = os.path.join(os.path.dirname(__file__), '../py_apps/twit_feed')
sys.path.append(path)
import pyscope
import twit_feed
#import tf_test_02
#-----------------------------------------------------------------------------
# Constants
#-----------------------------------------------------------------------------
MAX_ENTRIES = 1
FPS = 5
BET_TERM = ['#testing', '#blargz'] #['@Gr8AmTweetRace']
AUTH = { 'app_key': 'li8wn8Tb7xBifCnNIgyqUw',
'app_secret': 'vcwq36w4C4VXamlqWBDKM2E8etsOoangDoMhxNDU',
'oauth_token': '1969690717-rGw3VkRQ8IyL4OcPWtv5Y2CeBdVn8ndJrjGKraI',
'oauth_token_secret': 'KO7YIFMKWKaYTtz2zEyaSy044ixj5kIbWrDtZZL96ly0H'}
# Common colors
WHITE = 255,255,255
GREEN = 0,255,0
BLACK = 0,0,0
BLUE = 0,0,255
RED = 255,0,0
#-----------------------------------------------------------------------------
# Global Variables
#-----------------------------------------------------------------------------
g_terms = []
g_bet_loop = None
g_scope = None
#-----------------------------------------------------------------------------
# Functions
#-----------------------------------------------------------------------------
# Handle graphics on the screen
def draw_starting_screen():
global g_terms
global g_scope
# Create fonts
font_mode = pygame.font.Font(None, 68)
font_title_1 = pygame.font.Font(None, 68)
font_title_2 = pygame.font.Font(None, 68)
font_instr_1 = pygame.font.Font(None, 36)
font_instr_2 = pygame.font.Font(None, 36)
font_ent_title = pygame.font.Font(None, 36)
font_ent = pygame.font.Font(None, 36)
# Create background
rect_bg = pygame.draw.rect(g_scope.screen, BLACK, \
(0, 0, 540, 960), 0)
rect_title = pygame.draw.rect(g_scope.screen, WHITE, \
(20, 20, 500, 100), 0)
rect_game_mode = pygame.draw.rect(g_scope.screen, WHITE, \
(20, 140, 500, 60), 0)
rect_instructions = pygame.draw.rect(g_scope.screen, WHITE, \
(20, 220, 500, 100), 0)
rect_tweets = pygame.draw.rect(g_scope.screen, WHITE, \
(20, 340, 500, 300), 0)
# Draw title
title1 = "The Great American"
title2 = "Tweet Race"
text_title_1 = font_title_1.render(title1,1,BLACK)
text_title_2 = font_title_2.render(title2,1,BLACK)
g_scope.screen.blit(text_title_1, (40, 25))
g_scope.screen.blit(text_title_2, (130, 70))
# Draw game mode
mode_str = font_mode.render('Starting Gate',1,BLACK)
g_scope.screen.blit(mode_str, (115, 140))
# Draw instructions
instr_str_1 = 'Send a tweet to @Gr8AmTweetRace'
instr_str_2 = 'with a #term to enter!'
instr_1 = font_instr_1.render(instr_str_1,1,BLACK)
instr_2 = font_instr_2.render(instr_str_2,1,BLACK)
g_scope.screen.blit(instr_1, (40, 240))
g_scope.screen.blit(instr_2, (40, 270))
# Draw entrants
ent_title = font_ent_title.render('Contestants',1,BLACK)
g_scope.screen.blit(ent_title, (40, 360))
ent_y = 390
for i in range(0, MAX_ENTRIES):
ent_str = ''.join([str(i + 1), ': '])
if i < len(g_terms):
ent_str = ''.join([ent_str, g_terms[i]])
ent_disp = font_ent.render(ent_str,1,BLACK)
g_scope.screen.blit(ent_disp, (40, 390 + (i * 30)))
# Test if a term is already in the term list
def is_in_terms(entry):
global g_terms
for term in g_terms:
if ''.join(['#', entry]) == term:
return True
return False
#-----------------------------------------------------------------------------
# Main
#-----------------------------------------------------------------------------
def main():
global g_bet_loop
global g_scope
global g_terms
# Setup Twitter streamer
tf = twit_feed.TwitFeed(AUTH)
#tf = tf_test_02.TwitFeed(AUTH)
# Tweet that we are accepting bets
# Start streamer to search for terms
tf.start_track_streamer(BET_TERM)
# Setup display
pygame.init()
#g_scope = pyscope.pyscope()
fps_clock = pygame.time.Clock()
pygame.mouse.set_visible(False)
# Main game loop
g_bet_loop = False
while g_bet_loop:
# Handle game events
for event in pygame.event.get():
# End game if quit event raises
if event.type == pygame.QUIT:
g_bet_loop = False
# End game if 'q' or 'esc' key pressed
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_q or event.key == pygame.K_ESCAPE:
g_bet_loop = False
# Get entries and print them
entries = tf.get_entries()
for entry in entries:
print entry
if is_in_terms(entry) == False:
g_terms.append(''.join(['#', entry]))
print len(g_terms)
if len(g_terms) >= MAX_ENTRIES:
print 'breaking'
g_bet_loop = False
# Update screen
draw_starting_screen()
pygame.display.update()
fps_clock.tick(FPS)
# Clean up Twitter feed and pygame
print str(pygame.time.get_ticks())
tf.stop_tracking()
print str(pygame.time.get_ticks())
pygame.quit()
# Print terms
print 'Search terms: ', g_terms
# Run main
main()
| ShawnHymel/TweetRace | pytest/wager_test_01.py | Python | mit | 5,938 |
import asyncio
from abc import ABCMeta
from collections.abc import MutableMapping
from aiohttp import web
from aiohttp.web_request import Request
from aiohttp_session import get_session
from collections.abc import Sequence
AIOLOGIN_KEY = '__aiologin__'
ON_LOGIN = 1
ON_LOGOUT = 2
ON_AUTHENTICATED = 3
ON_FORBIDDEN = 4
ON_UNAUTHORIZED = 5
class AbstractUser(MutableMapping, metaclass=ABCMeta):
def __iter__(self):
return self.__dict__.__iter__()
def __len__(self):
return len(self.__dict__)
def __getitem__(self, key):
return getattr(self, key)
def __setitem__(self, key, value):
setattr(self, key, value)
def __delitem__(self, key):
delattr(self, key)
@property
def authenticated(self):
raise NotImplemented()
@property
def forbidden(self):
raise NotImplemented()
class AnonymousUser(AbstractUser):
@property
def authenticated(self):
return False
@property
def forbidden(self):
return False
# noinspection PyUnusedLocal
@asyncio.coroutine
def _unauthorized(*args, **kwargs):
raise web.HTTPUnauthorized()
# noinspection PyUnusedLocal
@asyncio.coroutine
def _forbidden(*args, **kwargs):
raise web.HTTPForbidden()
# noinspection PyUnusedLocal
@asyncio.coroutine
def _void(*args, **kwargs):
raise NotImplemented()
class AioLogin:
def __init__(self, request, session_name=AIOLOGIN_KEY, disabled=False,
auth_by_form=_void, auth_by_header=_void,
auth_by_session=_void, forbidden=_forbidden,
unauthorized=_unauthorized, anonymous_user=AnonymousUser,
session=get_session, signals=None):
self._request = request
self._disabled = disabled
self._session_name = session_name
self._anonymous_user = anonymous_user
self._session = session
self._auth_by_form = auth_by_form
self._auth_by_header = auth_by_header
self._auth_by_session = auth_by_session
self._unauthorized = unauthorized
self._forbidden = forbidden
self._on_login = []
self._on_logout = []
self._on_authenticated = []
self._on_forbidden = []
self._on_unauthorized = []
assert isinstance(signals, (type(None), Sequence)), \
"Excepted {!r} but received {!r}".format(Sequence, signals)
signals = [] if signals is None else signals
for sig in signals:
assert isinstance(sig, Sequence), \
"Excepted {!r} but received {!r}".format(Sequence, signals)
is_coro = asyncio.iscoroutinefunction(sig[1])
assert len(sig) == 2 and 1 <= sig[0] <= 7 and is_coro, \
"Incorrectly formatted signal argument {}".format(sig)
if sig[0] == 1:
self._on_login.append(sig[1])
elif sig[0] == 2:
self._on_logout.append(sig[1])
elif sig[0] == 3:
self._on_authenticated.append(sig[1])
elif sig[0] == 4:
self._on_forbidden.append(sig[1])
elif sig[0] == 5:
self._on_unauthorized.append(sig[1])
@asyncio.coroutine
def authenticate(self, *args, remember=False, **kwargs):
assert isinstance(remember, bool), \
"Expected {!r} but received {!r}".format(type(bool), type(remember))
user = yield from self._auth_by_form(self._request, *args, **kwargs)
if user is None:
for coro in self._on_unauthorized:
yield from coro(self._request)
raise web.HTTPUnauthorized
for coro in self._on_authenticated:
yield from coro(self._request)
yield from self.login(user, remember=remember)
@asyncio.coroutine
def login(self, user, remember):
assert isinstance(user, AbstractUser), \
"Expected {} but received {}".format(type(AbstractUser), type(user))
assert isinstance(remember, bool), \
"Expected {!r} but received {!r}".format(type(bool), type(remember))
session = yield from self._session(self._request)
try:
session.remember = remember
except:
session['_remember'] = remember
session[self._session_name] = dict(user)
for coro in self._on_login:
yield from coro(self._request)
@asyncio.coroutine
def logout(self):
session = yield from self._session(self._request)
session.invalidate()
for coro in self._on_logout:
yield from coro(self._request)
@asyncio.coroutine
def auth_by_header(self):
key = self._request.headers.get('AUTHORIZATION', None)
if key is None:
return None
return (yield from self._auth_by_header(self._request, key))
@asyncio.coroutine
def auth_by_session(self):
session = yield from self._session(self._request)
profile = session.get(self._session_name, None)
if profile is None:
return None
user = yield from self._auth_by_session(self._request, profile)
if user is None:
return None
return user
@property
def on_login(self):
return self._on_login
@property
def disabled(self):
return self._disabled
@property
def unauthorized(self):
return self._unauthorized
@property
def forbidden(self):
return self._forbidden
@property
def anonymous_user(self):
return self._anonymous_user
def setup(app, **kwargs):
app.middlewares.append(middleware_factory(**kwargs))
def middleware_factory(**options):
# noinspection PyUnusedLocal
@asyncio.coroutine
def aiologin_middleware(app, handler):
@asyncio.coroutine
def aiologin_handler(*args, **kwargs):
request = kwargs['request'] if 'request' in kwargs else args[0]
kwargs = {k: v for (k, v) in kwargs.items() if k != 'request'}
# noinspection PyTypeChecker
manager = options.get('manager', AioLogin)
request.aiologin = manager(request=request, **options)
return (yield from handler(request=request, **kwargs))
return aiologin_handler
return aiologin_middleware
def secured(func):
@asyncio.coroutine
def wrapper(*args, **kwargs):
request = kwargs['request'] if 'request' in kwargs else args[0]
kwargs = {k: v for (k, v) in kwargs.items() if k != 'request'}
if not isinstance(request, Request):
request = args[0].request
elif request not in args:
args = (request,) + args
if request.aiologin.disabled:
return (yield from func(*args, **kwargs))
user = yield from request.aiologin.auth_by_header()
if user is None:
user = yield from request.aiologin.auth_by_session()
if user is None:
user = request.aiologin.anonymous_user()
assert isinstance(user, AbstractUser), \
"Expected 'user' of type AbstractUser by got {}".format(type(user))
if not user.authenticated:
# noinspection PyProtectedMember
for coro in request.aiologin._on_unauthorized:
yield from coro(request)
return (yield from request.aiologin.unauthorized(*args, **kwargs))
if user.forbidden:
# noinspection PyProtectedMember
for coro in request.aiologin._on_forbidden:
yield from coro(request)
return (yield from request.aiologin.forbidden(*args, **kwargs))
request.current_user = user
# noinspection PyProtectedMember
for coro in request.aiologin._on_authenticated:
yield from coro(request)
return (yield from func(*args, **kwargs))
return wrapper
| trivigy/aiologin | aiologin/__init__.py | Python | mit | 7,901 |
# coding: utf-8
# pylint: disable=too-many-lines
import inspect
import sys
from typing import TypeVar, Optional, Sequence, Iterable, List, Any
from owlmixin import util
from owlmixin.errors import RequiredError, UnknownPropertiesError, InvalidTypeError
from owlmixin.owlcollections import TDict, TIterator, TList
from owlmixin.owlenum import OwlEnum, OwlObjectEnum
from owlmixin.transformers import (
DictTransformer,
JsonTransformer,
YamlTransformer,
ValueTransformer,
traverse_dict,
TOption,
)
T = TypeVar("T", bound="OwlMixin")
def _is_generic(type_):
return hasattr(type_, "__origin__")
def assert_extra(cls_properties, arg_dict, cls):
extra_keys: set = set(arg_dict.keys()) - {n for n, t in cls_properties}
if extra_keys:
raise UnknownPropertiesError(cls=cls, props=sorted(extra_keys))
def assert_none(value, type_, cls, name):
if value is None:
raise RequiredError(cls=cls, prop=name, type_=type_)
def assert_types(value, types: tuple, cls, name):
if not isinstance(value, types):
raise InvalidTypeError(cls=cls, prop=name, value=value, expected=types, actual=type(value))
def traverse(
type_, name, value, cls, force_snake_case: bool, force_cast: bool, restrict: bool
) -> Any:
# pylint: disable=too-many-return-statements,too-many-branches,too-many-arguments
if isinstance(type_, str):
type_ = sys.modules[cls.__module__].__dict__.get(type_)
if hasattr(type_, "__forward_arg__"):
# `_ForwardRef` (3.6) or `ForwardRef` (>= 3.7) includes __forward_arg__
# PEP 563 -- Postponed Evaluation of Annotations
type_ = sys.modules[cls.__module__].__dict__.get(type_.__forward_arg__)
if not _is_generic(type_):
assert_none(value, type_, cls, name)
if type_ is any:
return value
if type_ is Any:
return value
if isinstance(value, type_):
return value
if issubclass(type_, OwlMixin):
assert_types(value, (type_, dict), cls, name)
return type_.from_dict(
value, force_snake_case=force_snake_case, force_cast=force_cast, restrict=restrict
)
if issubclass(type_, ValueTransformer):
return type_.from_value(value)
if force_cast:
return type_(value)
assert_types(value, (type_,), cls, name)
return value
o_type = type_.__origin__
g_type = type_.__args__
if o_type == TList:
assert_none(value, type_, cls, name)
assert_types(value, (list,), cls, name)
return TList(
[
traverse(g_type[0], f"{name}.{i}", v, cls, force_snake_case, force_cast, restrict)
for i, v in enumerate(value)
]
)
if o_type == TIterator:
assert_none(value, type_, cls, name)
assert_types(value, (Iterable,), cls, name)
return TIterator(
traverse(g_type[0], f"{name}.{i}", v, cls, force_snake_case, force_cast, restrict)
for i, v in enumerate(value)
)
if o_type == TDict:
assert_none(value, type_, cls, name)
assert_types(value, (dict,), cls, name)
return TDict(
{
k: traverse(
g_type[0], f"{name}.{k}", v, cls, force_snake_case, force_cast, restrict
)
for k, v in value.items()
}
)
if o_type == TOption:
v = value.get() if isinstance(value, TOption) else value
# TODO: Fot `from_csvf`... need to more simple!!
if (isinstance(v, str) and v) or (not isinstance(v, str) and v is not None):
return TOption(
traverse(g_type[0], name, v, cls, force_snake_case, force_cast, restrict)
)
return TOption(None)
raise RuntimeError(f"This generics is not supported `{o_type}`")
class OwlMeta(type):
def __new__(cls, name, bases, class_dict):
ret_cls = type.__new__(cls, name, bases, class_dict)
ret_cls.__methods_dict__ = dict(inspect.getmembers(ret_cls, inspect.ismethod))
return ret_cls
class OwlMixin(DictTransformer, JsonTransformer, YamlTransformer, metaclass=OwlMeta):
@classmethod
def from_dict(
cls,
d: dict,
*,
force_snake_case: bool = True,
force_cast: bool = False,
restrict: bool = True,
) -> T:
"""From dict to instance
:param d: Dict
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Instance
Usage:
>>> from owlmixin.samples import Human, Food, Japanese
>>> human: Human = Human.from_dict({
... "id": 1,
... "name": "Tom",
... "favorites": [
... {"name": "Apple", "names_by_lang": {"en": "Apple", "de": "Apfel"}},
... {"name": "Orange"}
... ]
... })
>>> human.id
1
>>> human.name
'Tom'
>>> human.favorites[0].name
'Apple'
>>> human.favorites[0].names_by_lang.get()["de"]
'Apfel'
You can use default value
>>> taro: Japanese = Japanese.from_dict({
... "name": 'taro'
... }) # doctest: +NORMALIZE_WHITESPACE
>>> taro.name
'taro'
>>> taro.language
'japanese'
If you don't set `force_snake=False` explicitly, keys are transformed to snake case as following.
>>> human: Human = Human.from_dict({
... "--id": 1,
... "<name>": "Tom",
... "favorites": [
... {"name": "Apple", "namesByLang": {"en": "Apple"}}
... ]
... })
>>> human.id
1
>>> human.name
'Tom'
>>> human.favorites[0].names_by_lang.get()["en"]
'Apple'
You can allow extra parameters (like ``hogehoge``) if you set `restrict=False`.
>>> apple: Food = Food.from_dict({
... "name": "Apple",
... "hogehoge": "ooooooooooooooooooooo",
... }, restrict=False)
>>> apple.to_dict()
{'name': 'Apple'}
You can prohibit extra parameters (like ``hogehoge``) if you set `restrict=True` (which is default).
>>> human = Human.from_dict({
... "id": 1,
... "name": "Tom",
... "hogehoge1": "ooooooooooooooooooooo",
... "hogehoge2": ["aaaaaaaaaaaaaaaaaa", "iiiiiiiiiiiiiiiii"],
... "favorites": [
... {"name": "Apple", "namesByLang": {"en": "Apple", "de": "Apfel"}},
... {"name": "Orange"}
... ]
... }) # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
owlmixin.errors.UnknownPropertiesError:
. ∧,,_∧ ,___________________
⊂ ( ・ω・ )つ- < Unknown properties error
/// /::/ `-------------------
|::|/⊂ヽノ|::|」
/ ̄ ̄旦 ̄ ̄ ̄/|
______/ | |
|------ー----ー|/
<BLANKLINE>
`owlmixin.samples.Human` has unknown properties ['hogehoge1', 'hogehoge2']!!
<BLANKLINE>
* If you want to allow unknown properties, set `restrict=False`
* If you want to disallow unknown properties, add `hogehoge1` and `hogehoge2` to owlmixin.samples.Human
<BLANKLINE>
If you specify wrong type...
>>> human: Human = Human.from_dict({
... "id": 1,
... "name": "ichiro",
... "favorites": ["apple", "orange"]
... }) # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
owlmixin.errors.InvalidTypeError:
. ∧,,_∧ ,___________________
⊂ ( ・ω・ )つ- < Invalid Type error
/// /::/ `-------------------
|::|/⊂ヽノ|::|」
/ ̄ ̄旦 ̄ ̄ ̄/|
______/ | |
|------ー----ー|/
<BLANKLINE>
`owlmixin.samples.Human#favorites.0 = apple` doesn't match expected types.
Expected type is one of ["<class 'owlmixin.samples.Food'>", "<class 'dict'>"], but actual type is `<class 'str'>`
<BLANKLINE>
* If you want to force cast, set `force_cast=True`
* If you don't want to force cast, specify value which has correct type
<BLANKLINE>
If you don't specify required params... (ex. name
>>> human: Human = Human.from_dict({
... "id": 1
... }) # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
owlmixin.errors.RequiredError:
. ∧,,_∧ ,___________________
⊂ ( ・ω・ )つ- < Required error
/// /::/ `-------------------
|::|/⊂ヽノ|::|」
/ ̄ ̄旦 ̄ ̄ ̄/|
______/ | |
|------ー----ー|/
<BLANKLINE>
`owlmixin.samples.Human#name: <class 'str'>` is empty!!
<BLANKLINE>
* If `name` is certainly required, specify anything.
* If `name` is optional, change type from `<class 'str'>` to `TOption[<class 'str'>]`
<BLANKLINE>
"""
if isinstance(d, cls):
return d
instance: T = cls() # type: ignore
d = util.replace_keys(d, {"self": "_self"}, force_snake_case)
properties = cls.__annotations__.items()
if restrict:
assert_extra(properties, d, cls)
for n, t in properties:
f = cls.__methods_dict__.get(f"_{cls.__name__}___{n}") # type: ignore
arg_v = f(d.get(n)) if f else d.get(n)
def_v = getattr(instance, n, None)
setattr(
instance,
n,
traverse(
type_=t,
name=n,
value=def_v if arg_v is None else arg_v,
cls=cls,
force_snake_case=force_snake_case,
force_cast=force_cast,
restrict=restrict,
),
)
return instance
@classmethod
def from_optional_dict(
cls,
d: Optional[dict],
*,
force_snake_case: bool = True,
force_cast: bool = False,
restrict: bool = True,
) -> TOption[T]:
"""From dict to optional instance.
:param d: Dict
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Instance
Usage:
>>> from owlmixin.samples import Human
>>> Human.from_optional_dict(None).is_none()
True
>>> Human.from_optional_dict({}).get() # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
owlmixin.errors.RequiredError:
. ∧,,_∧ ,___________________
⊂ ( ・ω・ )つ- < Required error
/// /::/ `-------------------
|::|/⊂ヽノ|::|」
/ ̄ ̄旦 ̄ ̄ ̄/|
______/ | |
|------ー----ー|/
<BLANKLINE>
`owlmixin.samples.Human#id: <class 'int'>` is empty!!
<BLANKLINE>
* If `id` is certainly required, specify anything.
* If `id` is optional, change type from `<class 'int'>` to `TOption[<class 'int'>]`
<BLANKLINE>
"""
return TOption(
cls.from_dict(
d, force_snake_case=force_snake_case, force_cast=force_cast, restrict=restrict
)
if d is not None
else None
)
@classmethod
def from_dicts(
cls,
ds: List[dict],
*,
force_snake_case: bool = True,
force_cast: bool = False,
restrict: bool = True,
) -> TList[T]:
"""From list of dict to list of instance
:param ds: List of dict
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: List of instance
Usage:
>>> from owlmixin.samples import Human
>>> humans: TList[Human] = Human.from_dicts([
... {"id": 1, "name": "Tom", "favorites": [{"name": "Apple"}]},
... {"id": 2, "name": "John", "favorites": [{"name": "Orange"}]}
... ])
>>> humans[0].name
'Tom'
>>> humans[1].name
'John'
"""
return TList(
[
cls.from_dict(
d, force_snake_case=force_snake_case, force_cast=force_cast, restrict=restrict
)
for d in ds
]
)
@classmethod
def from_iterable_dicts(
cls,
ds: Iterable[dict],
*,
force_snake_case: bool = True,
force_cast: bool = False,
restrict: bool = True,
) -> TIterator[T]:
"""From iterable dict to iterable instance
:param ds: Iterable dict
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Iterator
Usage:
>>> from owlmixin.samples import Human
>>> humans: TIterator[Human] = Human.from_iterable_dicts([
... {"id": 1, "name": "Tom", "favorites": [{"name": "Apple"}]},
... {"id": 2, "name": "John", "favorites": [{"name": "Orange"}]}
... ])
>>> humans.next_at(0).get().name
'Tom'
>>> humans.next_at(0).get().name
'John'
"""
return TIterator(
cls.from_dict(
d, force_snake_case=force_snake_case, force_cast=force_cast, restrict=restrict
)
for d in ds
)
@classmethod
def from_optional_dicts(
cls,
ds: Optional[List[dict]],
*,
force_snake_case: bool = True,
force_cast: bool = False,
restrict: bool = True,
) -> TOption[TList[T]]:
"""From list of dict to optional list of instance.
:param ds: List of dict
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: List of instance
Usage:
>>> from owlmixin.samples import Human
>>> Human.from_optional_dicts(None).is_none()
True
>>> Human.from_optional_dicts([]).get()
[]
"""
return TOption(
cls.from_dicts(
ds, force_snake_case=force_snake_case, force_cast=force_cast, restrict=restrict
)
if ds is not None
else None
)
@classmethod
def from_optional_iterable_dicts(
cls,
ds: Optional[Iterable[dict]],
*,
force_snake_case: bool = True,
force_cast: bool = False,
restrict: bool = True,
) -> TOption[TIterator[T]]:
"""From iterable dict to optional iterable instance.
:param ds: Iterable dict
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Iterable instance
Usage:
>>> from owlmixin.samples import Human
>>> Human.from_optional_dicts(None).is_none()
True
>>> Human.from_optional_dicts([]).get()
[]
"""
return TOption(
cls.from_iterable_dicts(
ds, force_snake_case=force_snake_case, force_cast=force_cast, restrict=restrict
)
if ds is not None
else None
)
@classmethod
def from_dicts_by_key(
cls,
ds: dict,
*,
force_snake_case: bool = True,
force_cast: bool = False,
restrict: bool = True,
) -> TDict[T]:
"""From dict of dict to dict of instance
:param ds: Dict of dict
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Dict of instance
Usage:
>>> from owlmixin.samples import Human
>>> humans_by_name: TDict[Human] = Human.from_dicts_by_key({
... 'Tom': {"id": 1, "name": "Tom", "favorites": [{"name": "Apple"}]},
... 'John': {"id": 2, "name": "John", "favorites": [{"name": "Orange"}]}
... })
>>> humans_by_name['Tom'].name
'Tom'
>>> humans_by_name['John'].name
'John'
"""
return TDict(
{
k: cls.from_dict(
v, force_snake_case=force_snake_case, force_cast=force_cast, restrict=restrict
)
for k, v in ds.items()
}
)
@classmethod
def from_optional_dicts_by_key(
cls,
ds: Optional[dict],
*,
force_snake_case=True,
force_cast: bool = False,
restrict: bool = True,
) -> TOption[TDict[T]]:
"""From dict of dict to optional dict of instance.
:param ds: Dict of dict
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Dict of instance
Usage:
>>> from owlmixin.samples import Human
>>> Human.from_optional_dicts_by_key(None).is_none()
True
>>> Human.from_optional_dicts_by_key({}).get()
{}
"""
return TOption(
cls.from_dicts_by_key(
ds, force_snake_case=force_snake_case, force_cast=force_cast, restrict=restrict
)
if ds is not None
else None
)
@classmethod
def from_json(
cls, data: str, *, force_snake_case=True, force_cast: bool = False, restrict: bool = False
) -> T:
"""From json string to instance
:param data: Json string
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Instance
Usage:
>>> from owlmixin.samples import Human
>>> human: Human = Human.from_json('''{
... "id": 1,
... "name": "Tom",
... "favorites": [
... {"name": "Apple", "names_by_lang": {"en": "Apple", "de": "Apfel"}},
... {"name": "Orange"}
... ]
... }''')
>>> human.id
1
>>> human.name
'Tom'
>>> human.favorites[0].names_by_lang.get()["de"]
'Apfel'
"""
return cls.from_dict(
util.load_json(data),
force_snake_case=force_snake_case,
force_cast=force_cast,
restrict=restrict,
)
@classmethod
def from_jsonf(
cls,
fpath: str,
encoding: str = "utf8",
*,
force_snake_case=True,
force_cast: bool = False,
restrict: bool = False,
) -> T:
"""From json file path to instance
:param fpath: Json file path
:param encoding: Json file encoding
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Instance
"""
return cls.from_dict(
util.load_jsonf(fpath, encoding),
force_snake_case=force_snake_case,
force_cast=force_cast,
restrict=restrict,
)
@classmethod
def from_json_to_list(
cls, data: str, *, force_snake_case=True, force_cast: bool = False, restrict: bool = False
) -> TList[T]:
"""From json string to list of instance
:param data: Json string
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: List of instance
Usage:
>>> from owlmixin.samples import Human
>>> humans: TList[Human] = Human.from_json_to_list('''[
... {"id": 1, "name": "Tom", "favorites": [{"name": "Apple"}]},
... {"id": 2, "name": "John", "favorites": [{"name": "Orange"}]}
... ]''')
>>> humans[0].name
'Tom'
>>> humans[1].name
'John'
"""
return cls.from_dicts(
util.load_json(data),
force_snake_case=force_snake_case,
force_cast=force_cast,
restrict=restrict,
)
@classmethod
def from_json_to_iterator(
cls, data: str, *, force_snake_case=True, force_cast: bool = False, restrict: bool = False
) -> TIterator[T]:
"""From json string to iterable instance
:param data: Json string
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Iterable instance
Usage:
>>> from owlmixin.samples import Human
>>> humans: TIterator[Human] = Human.from_json_to_iterator('''[
... {"id": 1, "name": "Tom", "favorites": [{"name": "Apple"}]},
... {"id": 2, "name": "John", "favorites": [{"name": "Orange"}]}
... ]''')
>>> humans.next_at(1).get().name
'John'
>>> humans.next_at(0).is_none()
True
"""
return cls.from_iterable_dicts(
util.load_json(data),
force_snake_case=force_snake_case,
force_cast=force_cast,
restrict=restrict,
)
@classmethod
def from_jsonf_to_list(
cls,
fpath: str,
encoding: str = "utf8",
*,
force_snake_case=True,
force_cast: bool = False,
restrict: bool = False,
) -> TList[T]:
"""From json file path to list of instance
:param fpath: Json file path
:param encoding: Json file encoding
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: List of instance
"""
return cls.from_dicts(
util.load_jsonf(fpath, encoding),
force_snake_case=force_snake_case,
force_cast=force_cast,
restrict=restrict,
)
@classmethod
def from_jsonf_to_iterator(
cls,
fpath: str,
encoding: str = "utf8",
*,
force_snake_case=True,
force_cast: bool = False,
restrict: bool = False,
) -> TIterator[T]:
"""From json file path to iterable instance
:param fpath: Json file path
:param encoding: Json file encoding
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Iterable instance
"""
return cls.from_iterable_dicts(
util.load_jsonf(fpath, encoding),
force_snake_case=force_snake_case,
force_cast=force_cast,
restrict=restrict,
)
@classmethod
def from_yaml(
cls, data: str, *, force_snake_case=True, force_cast: bool = False, restrict: bool = True
) -> T:
"""From yaml string to instance
:param data: Yaml string
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Instance
Usage:
>>> from owlmixin.samples import Human
>>> human: Human = Human.from_yaml('''
... id: 1
... name: Tom
... favorites:
... - name: Apple
... names_by_lang:
... en: Apple
... de: Apfel
... - name: Orange
... ''')
>>> human.id
1
>>> human.name
'Tom'
>>> human.favorites[0].names_by_lang.get()["de"]
'Apfel'
"""
return cls.from_dict(
util.load_yaml(data),
force_snake_case=force_snake_case,
force_cast=force_cast,
restrict=restrict,
)
@classmethod
def from_yamlf(
cls,
fpath: str,
encoding: str = "utf8",
*,
force_snake_case=True,
force_cast: bool = False,
restrict: bool = True,
) -> T:
"""From yaml file path to instance
:param fpath: Yaml file path
:param encoding: Yaml file encoding
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Instance
"""
return cls.from_dict(
util.load_yamlf(fpath, encoding),
force_snake_case=force_snake_case,
force_cast=force_cast,
restrict=restrict,
)
@classmethod
def from_yaml_to_list(
cls, data: str, *, force_snake_case=True, force_cast: bool = False, restrict: bool = True
) -> TList[T]:
"""From yaml string to list of instance
:param data: Yaml string
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: List of instance
Usage:
>>> from owlmixin.samples import Human
>>> humans: TList[Human] = Human.from_yaml_to_list('''
... - id: 1
... name: Tom
... favorites:
... - name: Apple
... - id: 2
... name: John
... favorites:
... - name: Orange
... ''')
>>> humans[0].name
'Tom'
>>> humans[1].name
'John'
>>> humans[0].favorites[0].name
'Apple'
"""
return cls.from_dicts(
util.load_yaml(data),
force_snake_case=force_snake_case,
force_cast=force_cast,
restrict=restrict,
)
@classmethod
def from_yaml_to_iterator(
cls, data: str, *, force_snake_case=True, force_cast: bool = False, restrict: bool = True
) -> TIterator[T]:
"""From yaml string to iterable instance
:param data: Yaml string
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Iterable instance
Usage:
>>> from owlmixin.samples import Human
>>> humans: TIterator[Human] = Human.from_yaml_to_iterator('''
... - id: 1
... name: Tom
... favorites:
... - name: Apple
... - id: 2
... name: John
... favorites:
... - name: Orange
... ''')
>>> human1 = humans.next_at(1).get()
>>> human1.name
'John'
>>> humans.next_at(0).is_none()
True
>>> human1.favorites[0].name
'Orange'
"""
return cls.from_iterable_dicts(
util.load_yaml(data),
force_snake_case=force_snake_case,
force_cast=force_cast,
restrict=restrict,
)
@classmethod
def from_yamlf_to_list(
cls,
fpath: str,
encoding: str = "utf8",
*,
force_snake_case=True,
force_cast: bool = False,
restrict: bool = True,
) -> TList[T]:
"""From yaml file path to list of instance
:param fpath: Yaml file path
:param encoding: Yaml file encoding
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: List of instance
"""
return cls.from_dicts(
util.load_yamlf(fpath, encoding),
force_snake_case=force_snake_case,
force_cast=force_cast,
restrict=restrict,
)
@classmethod
def from_yamlf_to_iterator(
cls,
fpath: str,
encoding: str = "utf8",
*,
force_snake_case=True,
force_cast: bool = False,
restrict: bool = True,
) -> TIterator[T]:
"""From yaml file path to iterable instance
:param fpath: Yaml file path
:param encoding: Yaml file encoding
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Iterable instance
"""
return cls.from_iterable_dicts(
util.load_yamlf(fpath, encoding),
force_snake_case=force_snake_case,
force_cast=force_cast,
restrict=restrict,
)
@classmethod
def from_csvf_to_list(
cls,
fpath: str,
fieldnames: Optional[Sequence[str]] = None,
encoding: str = "utf8",
*,
force_snake_case: bool = True,
restrict: bool = True,
) -> TList[T]:
"""From csv file path to list of instance
:param fpath: Csv file path
:param fieldnames: Specify csv header names if not included in the file
:param encoding: Csv file encoding
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param restrict: Prohibit extra parameters if True
:return: List of Instance
"""
return cls.from_dicts(
list(util.load_csvf(fpath, fieldnames, encoding)),
force_snake_case=force_snake_case,
force_cast=True,
restrict=restrict,
)
@classmethod
def from_csvf_to_iterator(
cls,
fpath: str,
fieldnames: Optional[Sequence[str]] = None,
encoding: str = "utf8",
*,
force_snake_case: bool = True,
restrict: bool = True,
) -> TIterator[T]:
"""From csv file path to iterable instance
:param fpath: Csv file path
:param fieldnames: Specify csv header names if not included in the file
:param encoding: Csv file encoding
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param restrict: Prohibit extra parameters if True
:return: Iterable Instance
"""
return cls.from_iterable_dicts(
util.load_csvf(fpath, fieldnames, encoding),
force_snake_case=force_snake_case,
force_cast=True,
restrict=restrict,
)
@classmethod
def from_json_url(
cls, url: str, *, force_snake_case=True, force_cast: bool = False, restrict: bool = False
) -> T:
"""From url which returns json to instance
:param url: Url which returns json
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Instance
"""
return cls.from_dict(
util.load_json_url(url),
force_snake_case=force_snake_case,
force_cast=force_cast,
restrict=restrict,
)
| tadashi-aikawa/owlmixin | owlmixin/__init__.py | Python | mit | 34,064 |
"""Chapter 22 Practice Questions
Answers Chapter 22 Practice Questions via Python code.
"""
from pythontutorials.books.CrackingCodes.Ch18.vigenereCipher import decryptMessage
def main():
# 1. How many prime numbers are there?
# Hint: Check page 322
message = "Iymdi ah rv urxxeqfi fjdjqv gu gzuqw clunijh." # Encrypted with key "PRIMES"
#print(decryptMessage(blank, blank)) # Fill in the blanks
# 2. What are integers that are not prime called?
# Hint: Check page 323
message = "Vbmggpcw wlvx njr bhv pctqh emi psyzxf czxtrwdxr fhaugrd." # Encrypted with key "NOTCALLEDEVENS"
#print(decryptMessage(blank, blank)) # Fill in the blanks
# 3. What are two algorithms for finding prime numbers?
# Hint: Check page 323
# Encrypted with key "ALGORITHMS"
message = "Tsk hyzxl mdgzxwkpfz gkeo ob kpbz ngov gfv: bkpmd dtbwjqhu, eaegk cw Mkhfgsenseml, hzv Rlhwe-Ubsxwr."
#print(decryptMessage(blank, blank)) # Fill in the blanks
# If PracticeQuestions.py is run (instead of imported as a module), call
# the main() function:
if __name__ == '__main__':
main()
| JoseALermaIII/python-tutorials | pythontutorials/books/CrackingCodes/Ch22/PracticeQuestions.py | Python | mit | 1,117 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
from future.utils import with_metaclass
import numpy as np
import scipy as sp
from abc import ABCMeta, abstractmethod
from scipy import integrate
import scipy.interpolate as interpolate
from . import core
from . import refstate
__all__ = ['GammaEos','GammaCalc']
#====================================================================
# Base Class
#====================================================================
def set_calculator(eos_mod, kind, kind_opts):
assert kind in kind_opts, (
kind + ' is not a valid thermal calculator. '+
'You must select one of: ' + str(kind_opts))
eos_mod._kind = kind
if kind=='GammaPowLaw':
calc = _GammaPowLaw(eos_mod)
elif kind=='GammaShiftPowLaw':
calc = _GammaShiftPowLaw(eos_mod)
elif kind=='GammaFiniteStrain':
calc = _GammaFiniteStrain(eos_mod)
else:
raise NotImplementedError(kind+' is not a valid '+
'GammaEos Calculator.')
eos_mod._add_calculator(calc, calc_type='gamma')
pass
#====================================================================
class GammaEos(with_metaclass(ABCMeta, core.Eos)):
"""
EOS model for compression dependence of Grüneisen parameter.
Parameters
----------
Thermodyn properties depend only on volume
"""
_kind_opts = ['GammaPowLaw','GammaShiftPowLaw','GammaFiniteStrain']
def __init__(self, kind='GammaPowLaw', natom=1, model_state={}):
self._pre_init(natom=natom)
set_calculator(self, kind, self._kind_opts)
ref_compress_state='P0'
ref_thermal_state='T0'
ref_energy_type = 'E0'
refstate.set_calculator(self, ref_compress_state=ref_compress_state,
ref_thermal_state=ref_thermal_state,
ref_energy_type=ref_energy_type)
# self._set_ref_state()
self._post_init(model_state=model_state)
pass
def __repr__(self):
calc = self.calculators['gamma']
return ("GammaEos(kind={kind}, natom={natom}, "
"model_state={model_state}, "
")"
.format(kind=repr(calc.name),
natom=repr(self.natom),
model_state=self.model_state
)
)
def _set_ref_state(self):
calc = self.calculators['gamma']
path_const = calc.path_const
if path_const=='S':
param_ref_names = []
param_ref_units = []
param_ref_defaults = []
param_ref_scales = []
else:
raise NotImplementedError(
'path_const '+path_const+' is not valid for ThermalEos.')
self._path_const = calc.path_const
self._param_ref_names = param_ref_names
self._param_ref_units = param_ref_units
self._param_ref_defaults = param_ref_defaults
self._param_ref_scales = param_ref_scales
pass
def gamma(self, V_a):
gamma_a = self.calculators['gamma']._calc_gamma(V_a)
return gamma_a
def gamma_deriv(self, V_a):
gamma_deriv_a = self.calculators['gamma']._calc_gamma_deriv(V_a)
return gamma_deriv_a
def temp(self, V_a, T0=None):
temp_a = self.calculators['gamma']._calc_temp(V_a, T0=T0)
return temp_a
#====================================================================
class GammaCalc(with_metaclass(ABCMeta, core.Calculator)):
"""
Abstract Equation of State class for a reference Compression Path
Path can either be isothermal (T=const) or adiabatic (S=const)
For this restricted path, thermodyn properties depend only on volume
"""
def __init__(self, eos_mod):
self._eos_mod = eos_mod
self._init_params()
self._path_const = 'S'
pass
@property
def path_const( self ):
return self._path_const
####################
# Required Methods #
####################
@abstractmethod
def _init_params( self ):
"""Initialize list of calculator parameter names."""
pass
@abstractmethod
def _calc_gamma(self, V_a):
pass
@abstractmethod
def _calc_gamma_deriv(self, V_a):
pass
@abstractmethod
def _calc_temp(self, V_a, T0=None):
pass
def _calc_theta(self, V_a):
theta0 = self.eos_mod.get_param_values(param_names=['theta0'])
theta = self._calc_temp(V_a, T0=theta0)
return theta
####################
# Optional Methods #
####################
# EOS property functions
def _calc_param_deriv(self, fname, paramname, V_a, dxfrac=1e-6):
scale_a, paramkey_a = self.get_param_scale(apply_expand_adj=True )
scale = scale_a[paramkey_a==paramname][0]
# print 'scale: ' + np.str(scale)
#if (paramname is 'E0') and (fname is 'energy'):
# return np.ones(V_a.shape)
try:
fun = getattr(self, fname)
# Note that self is implicitly included
val0_a = fun(V_a)
except:
assert False, 'That is not a valid function name ' + \
'(e.g. it should be press or energy)'
try:
param = core.get_params([paramname])[0]
dparam = scale*dxfrac
# print 'param: ' + np.str(param)
# print 'dparam: ' + np.str(dparam)
except:
assert False, 'This is not a valid parameter name'
# set param value in eos_d dict
core.set_params([paramname,], [param+dparam,])
# Note that self is implicitly included
dval_a = fun(V_a) - val0_a
# reset param to original value
core.set_params([paramname], [param])
deriv_a = dval_a/dxfrac
return deriv_a
def _calc_energy_perturb(self, V_a):
"""Returns Energy pertubation basis functions resulting from fractional changes to EOS params."""
fname = 'energy'
scale_a, paramkey_a = self.get_param_scale(
apply_expand_adj=self.expand_adj)
Eperturb_a = []
for paramname in paramkey_a:
iEperturb_a = self._calc_param_deriv(fname, paramname, V_a)
Eperturb_a.append(iEperturb_a)
Eperturb_a = np.array(Eperturb_a)
return Eperturb_a, scale_a, paramkey_a
#====================================================================
# Implementations
#====================================================================
class _GammaPowLaw(GammaCalc):
_path_opts=['S']
def __init__(self, eos_mod):
super(_GammaPowLaw, self).__init__(eos_mod)
pass
def _init_params(self):
"""Initialize list of calculator parameter names."""
V0 = 100
gamma0 = 1.0
q = 1.0
self._param_names = ['V0', 'gamma0', 'q']
self._param_units = ['ang^3', '1', '1']
self._param_defaults = [V0, gamma0, q]
self._param_scales = [V0, gamma0, q]
pass
def _calc_gamma(self, V_a):
V0, gamma0, q = self.eos_mod.get_param_values(
param_names=['V0','gamma0','q'])
gamma_a = gamma0 *(V_a/V0)**q
return gamma_a
def _calc_gamma_deriv(self, V_a):
q, = self.eos_mod.get_param_values(param_names=['q'])
gamma_a = self._calc_gamma(V_a)
gamma_deriv_a = q*gamma_a/V_a
return gamma_deriv_a
def _calc_temp(self, V_a, T0=None):
if T0 is None:
T0 = self.eos_mod.refstate.ref_temp()
# T0, = self.eos_mod.get_param_values(param_names=['T0'], overrides=[T0])
gamma0, q = self.eos_mod.get_param_values(
param_names=['gamma0','q'])
gamma_a = self._calc_gamma(V_a)
T_a = T0*np.exp(-(gamma_a - gamma0)/q)
return T_a
#====================================================================
class _GammaShiftPowLaw(GammaCalc):
"""
Shifted Power Law description of Grüneisen Parameter (Al’tshuler, 1987)
"""
_path_opts=['S']
def __init__(self, eos_mod):
super(_GammaShiftPowLaw, self).__init__(eos_mod)
pass
def _init_params(self):
"""Initialize list of calculator parameter names."""
V0 = 100
gamma0 = 1.5
gamma_inf = 2/3
beta = 1.4
T0 = 300
self._param_names = ['V0', 'gamma0', 'gamma_inf', 'beta', 'T0']
self._param_units = ['ang^3', '1', '1', '1', 'K']
self._param_defaults = [V0, gamma0, gamma_inf, beta, T0]
self._param_scales = [V0, gamma0, gamma_inf, beta, T0]
pass
def _calc_gamma(self, V_a):
V0, gamma0, gamma_inf, beta = self.eos_mod.get_param_values(
param_names=['V0','gamma0','gamma_inf','beta'])
gamma_a = gamma_inf + (gamma0-gamma_inf)*(V_a/V0)**beta
return gamma_a
def _calc_gamma_deriv(self, V_a):
gamma_inf, beta = self.eos_mod.get_param_values(
param_names=['gamma_inf','beta'])
gamma_a = self._calc_gamma(V_a)
gamma_deriv_a = beta/V_a*(gamma_a-gamma_inf)
return gamma_deriv_a
def _calc_temp(self, V_a, T0=None):
T0, = self.eos_mod.get_param_values(param_names=['T0'], overrides=[T0])
V0, gamma0, gamma_inf, beta = self.eos_mod.get_param_values(
param_names=['V0','gamma0','gamma_inf','beta'])
gamma_a = self._calc_gamma(V_a)
x = V_a/V0
T_a = T0*x**(-gamma_inf)*np.exp((gamma0-gamma_inf)/beta*(1-x**beta))
return T_a
#====================================================================
class _GammaFiniteStrain(GammaCalc):
_path_opts=['S']
def __init__(self, eos_mod):
super(_GammaFiniteStrain, self).__init__(eos_mod)
pass
def _init_params(self):
"""Initialize list of calculator parameter names."""
V0 = 100
gamma0 = 0.5
gammap0 = -2
self._param_names = ['V0', 'gamma0', 'gammap0']
self._param_units = ['ang^3', '1', '1']
self._param_defaults = [V0, gamma0, gammap0]
self._param_scales = [V0, gamma0, gammap0]
pass
def _calc_strain_coefs(self):
V0, gamma0, gammap0 = self.eos_mod.get_param_values(
param_names=['V0','gamma0','gammap0'])
a1 = 6*gamma0
a2 = -12*gamma0 +36*gamma0**2 -18*gammap0
return a1, a2
def _calc_fstrain(self, V_a, deriv=False):
V0, = self.eos_mod.get_param_values(param_names=['V0'])
x = V_a/V0
if deriv:
return -1/(3*V0)*x**(-5/3)
else:
return 1/2*(x**(-2/3)-1)
pass
def _calc_gamma(self, V_a):
a1, a2 = self._calc_strain_coefs()
fstr_a = self._calc_fstrain(V_a)
gamma_a = (2*fstr_a+1)*(a1+a2*fstr_a)/(6*(1+a1*fstr_a+0.5*a2*fstr_a**2))
return gamma_a
def _calc_gamma_deriv(self, V_a):
a1, a2 = self._calc_strain_coefs()
fstr_a = self._calc_fstrain(V_a)
fstr_deriv = self._calc_fstrain(V_a, deriv=True)
gamma_a = self._calc_gamma(V_a)
gamma_deriv_a = gamma_a*fstr_deriv*(
2/(2*fstr_a+1)+a2/(a1+a2*fstr_a)
-(a1+a2*fstr_a)/(1+a1*fstr_a+.5*a2*fstr_a**2))
return gamma_deriv_a
def _calc_temp(self, V_a, T0=None):
if T0 is None:
T0 = self.eos_mod.refstate.ref_temp()
a1, a2 = self._calc_strain_coefs()
fstr_a = self._calc_fstrain(V_a)
T_a = T0*np.sqrt(1 + a1*fstr_a + 0.5*a2*fstr_a**2)
return T_a
#====================================================================
| aswolf/xmeos | xmeos/models/gamma.py | Python | mit | 11,700 |
import numpy
import pytest
import theano
class TestInputLayer:
@pytest.fixture
def layer(self):
from lasagne.layers.input import InputLayer
return InputLayer((3, 2))
def test_input_var(self, layer):
assert layer.input_var.ndim == 2
def test_get_output_shape(self, layer):
assert layer.get_output_shape() == (3, 2)
def test_get_output_without_arguments(self, layer):
assert layer.get_output() is layer.input_var
def test_get_output_input_is_variable(self, layer):
variable = theano.Variable("myvariable")
assert layer.get_output(variable) is variable
def test_get_output_input_is_array(self, layer):
input = [[1,2,3]]
output = layer.get_output(input)
assert numpy.all(output.eval() == input)
def test_get_output_input_is_a_mapping(self, layer):
input = {layer: theano.tensor.matrix()}
assert layer.get_output(input) is input[layer]
def test_input_var_name(self, layer):
assert layer.input_var.name == "input"
def test_named_layer_input_var_name(self):
from lasagne.layers.input import InputLayer
layer = InputLayer((3, 2), name="foo")
assert layer.input_var.name == "foo.input"
| diogo149/Lasagne | lasagne/tests/layers/test_input.py | Python | mit | 1,255 |
'''
Created on Nov 19, 2011
@author: scottporter
'''
class BaseSingleton(object):
_instance = None
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance | freneticmonkey/epsilonc | resources/scripts/core/basesingleton.py | Python | mit | 263 |
"""Device tracker for Synology SRM routers."""
from __future__ import annotations
import logging
import synology_srm
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA as DEVICE_TRACKER_PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_SSL,
CONF_USERNAME,
CONF_VERIFY_SSL,
)
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import ConfigType
_LOGGER = logging.getLogger(__name__)
DEFAULT_USERNAME = "admin"
DEFAULT_PORT = 8001
DEFAULT_SSL = True
DEFAULT_VERIFY_SSL = False
PLATFORM_SCHEMA = DEVICE_TRACKER_PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
}
)
ATTRIBUTE_ALIAS = {
"band": None,
"connection": None,
"current_rate": None,
"dev_type": None,
"hostname": None,
"ip6_addr": None,
"ip_addr": None,
"is_baned": "is_banned",
"is_beamforming_on": None,
"is_guest": None,
"is_high_qos": None,
"is_low_qos": None,
"is_manual_dev_type": None,
"is_manual_hostname": None,
"is_online": None,
"is_parental_controled": "is_parental_controlled",
"is_qos": None,
"is_wireless": None,
"mac": None,
"max_rate": None,
"mesh_node_id": None,
"rate_quality": None,
"signalstrength": "signal_strength",
"transferRXRate": "transfer_rx_rate",
"transferTXRate": "transfer_tx_rate",
}
def get_scanner(hass: HomeAssistant, config: ConfigType) -> DeviceScanner | None:
"""Validate the configuration and return Synology SRM scanner."""
scanner = SynologySrmDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
class SynologySrmDeviceScanner(DeviceScanner):
"""This class scans for devices connected to a Synology SRM router."""
def __init__(self, config):
"""Initialize the scanner."""
self.client = synology_srm.Client(
host=config[CONF_HOST],
port=config[CONF_PORT],
username=config[CONF_USERNAME],
password=config[CONF_PASSWORD],
https=config[CONF_SSL],
)
if not config[CONF_VERIFY_SSL]:
self.client.http.disable_https_verify()
self.devices = []
self.success_init = self._update_info()
_LOGGER.info("Synology SRM scanner initialized")
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return [device["mac"] for device in self.devices]
def get_extra_attributes(self, device) -> dict:
"""Get the extra attributes of a device."""
device = next(
(result for result in self.devices if result["mac"] == device), None
)
filtered_attributes: dict[str, str] = {}
if not device:
return filtered_attributes
for attribute, alias in ATTRIBUTE_ALIAS.items():
if (value := device.get(attribute)) is None:
continue
attr = alias or attribute
filtered_attributes[attr] = value
return filtered_attributes
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
filter_named = [
result["hostname"] for result in self.devices if result["mac"] == device
]
if filter_named:
return filter_named[0]
return None
def _update_info(self):
"""Check the router for connected devices."""
_LOGGER.debug("Scanning for connected devices")
try:
self.devices = self.client.core.get_network_nsm_device({"is_online": True})
except synology_srm.http.SynologyException as ex:
_LOGGER.error("Error with the Synology SRM: %s", ex)
return False
_LOGGER.debug("Found %d device(s) connected to the router", len(self.devices))
return True
| rohitranjan1991/home-assistant | homeassistant/components/synology_srm/device_tracker.py | Python | mit | 4,397 |
'''
utils.py: helper functions for DLP api
Copyright (c) 2017 Vanessa Sochat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from som.logger import bot
import os
import sys
def paginate_items(items,size=100):
'''paginate_items will return a list of lists, each of a particular max
size
'''
groups = []
for idx in range(0, len(items), size):
group = items[idx:idx+size]
groups.append(group)
return groups
def clean_text(text,findings):
'''clean_text will remove phi findings from a text object
:param text: the original text sent to the content.inspect DLP endpoint
:param findings: the full response for the text.
'''
if 'findings' in findings:
for finding in findings['findings']:
label = "**%s**" %finding['infoType']['name']
# Note sure if this is best strategy, we can start with it
text = text.replace(finding['quote'],label)
return text
| radinformatics/som-tools | som/api/google/dlp/utils.py | Python | mit | 1,944 |
# -*- coding: utf-8 -*-
'''
Description:
Extract the feature from the text in English.
Version:
python3
'''
from sklearn.feature_extraction.text import CountVectorizer
VECTORIZER = CountVectorizer(min_df=1)
# 以下代码设置了特征提取方法的参数(以1-2个单词作为滑动窗口大小,以空格作为单词的分割点,最小词频为1)
# 详细参考API介绍:
# http://scikit-learn.org/stable/modules/feature_extraction.html#text-feature-extraction
# VECTORIZER = CountVectorizer(ngram_range=(1,2), token_pattern=r'\b\w+\b', min_df=1)
CORPUS = [
'This is the first document.',
'This is the second second document.',
'And the third one.',
'Is this the first document?'
]
X = VECTORIZER.fit_transform(CORPUS)
FEATURE_NAMES = VECTORIZER.get_feature_names()
print(FEATURE_NAMES)
| Jackson-Y/Machine-Learning | text/feature_extraction.py | Python | mit | 828 |
import _plotly_utils.basevalidators
class YValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="y", parent_name="bar", **kwargs):
super(YValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
anim=kwargs.pop("anim", True),
edit_type=kwargs.pop("edit_type", "calc+clearAxisTypes"),
role=kwargs.pop("role", "data"),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/bar/_y.py | Python | mit | 480 |
# -*- coding: utf-8 -*-
from .record import (
Metadata,
Record,
)
__all__ = ['Parser']
class Parser:
def __init__(self, store):
self.store = store
def parse_record(self, metadata, line):
factors = line.split('|')
if len(factors) < 7:
return
registry, cc, type_, start, value, dete, status = factors[:7]
if type_ not in ('ipv4', 'ipv6'):
return
return Record(metadata, start, type_, value, cc)
def do(self, fp):
metadata = None
for line in fp:
line = line[:-1]
if line.startswith('#') or line.endswith('summary'):
continue
if metadata is None:
version, registry, serial, records,\
startdate, enddate, utcoffset = line.split('|')[:7]
metadata = Metadata(registry, version, serial)
continue
record = self.parse_record(metadata, line)
if record is None:
continue
self.store.persist(record)
| yosida95/ip2country | ip2country/parser.py | Python | mit | 1,079 |
class Solution(object):
def isPalindrome(self, s):
"""
:type s: str
:rtype: bool
"""
if not s:
return True
start = 0
end = len(s)-1
s = s.lower()
while start < end:
while start < end and not s[start].isalnum():
start += 1
while start < end and not s[end].isalnum():
end -= 1
if s[start] == s[end]:
start += 1
end -= 1
else:
return False
return True | tedye/leetcode | Python/leetcode.125.valid-palindrome.py | Python | mit | 594 |
# coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.subtitle_v30_rc2 import SubtitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.title_v30_rc2 import TitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.translated_title_v30_rc2 import TranslatedTitleV30Rc2 # noqa: F401,E501
class WorkTitleV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'TitleV30Rc2',
'subtitle': 'SubtitleV30Rc2',
'translated_title': 'TranslatedTitleV30Rc2'
}
attribute_map = {
'title': 'title',
'subtitle': 'subtitle',
'translated_title': 'translated-title'
}
def __init__(self, title=None, subtitle=None, translated_title=None): # noqa: E501
"""WorkTitleV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._subtitle = None
self._translated_title = None
self.discriminator = None
if title is not None:
self.title = title
if subtitle is not None:
self.subtitle = subtitle
if translated_title is not None:
self.translated_title = translated_title
@property
def title(self):
"""Gets the title of this WorkTitleV30Rc2. # noqa: E501
:return: The title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TitleV30Rc2
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this WorkTitleV30Rc2.
:param title: The title of this WorkTitleV30Rc2. # noqa: E501
:type: TitleV30Rc2
"""
self._title = title
@property
def subtitle(self):
"""Gets the subtitle of this WorkTitleV30Rc2. # noqa: E501
:return: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:rtype: SubtitleV30Rc2
"""
return self._subtitle
@subtitle.setter
def subtitle(self, subtitle):
"""Sets the subtitle of this WorkTitleV30Rc2.
:param subtitle: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:type: SubtitleV30Rc2
"""
self._subtitle = subtitle
@property
def translated_title(self):
"""Gets the translated_title of this WorkTitleV30Rc2. # noqa: E501
:return: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TranslatedTitleV30Rc2
"""
return self._translated_title
@translated_title.setter
def translated_title(self, translated_title):
"""Sets the translated_title of this WorkTitleV30Rc2.
:param translated_title: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:type: TranslatedTitleV30Rc2
"""
self._translated_title = translated_title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkTitleV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkTitleV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| Royal-Society-of-New-Zealand/NZ-ORCID-Hub | orcid_api_v3/models/work_title_v30_rc2.py | Python | mit | 4,930 |
import collections
import json
import unittest
import responses
from requests import HTTPError
from mock import patch
from batfish import Client
from batfish.__about__ import __version__
class TestClientAuthorize(unittest.TestCase):
def setUp(self):
with patch('batfish.client.read_token_from_conf',
return_value=None):
self.cli = Client()
@responses.activate
def test_authorize_error(self):
url = "https://api.digitalocean.com/v2/actions"
responses.add(responses.GET, url,
body='{"error": "something"}', status=500,
content_type="application/json")
with self.assertRaises(HTTPError):
self.cli.authorize("test_token")
@responses.activate
def test_authorize_unauthorized(self):
url = "https://api.digitalocean.com/v2/kura"
body = {'id': "unauthorized", 'message': "Unable to authenticate you."}
responses.add(responses.GET, url, body=json.dumps(body), status=401,
content_type="application/json")
self.cli.authorize("test_token")
self.assertEquals(responses.calls[0].response.status_code, 401)
@responses.activate
def test_authorize_unauthorized(self):
url = "https://api.digitalocean.com/v2/actions"
responses.add(responses.GET, url,
body='{"error": "something"}', status=200,
content_type="application/json")
auth = self.cli.authorize("test_token")
self.assertEquals(auth, "OK")
self.assertEquals(responses.calls[0].response.status_code, 200)
| kura/batfish | tests/test_client_authorize.py | Python | mit | 1,645 |
from .. import console, fields
from ..exceptions import ConsoleError
from . import mock
import pytest
console.raw_input=mock.raw_input
def test_prompt():
field=fields.Field("test_field", "test field", fields.Field.TYPE_TEXT_ONELINE, "this is a test field")
assert console.prompt(field)=="999"
def test_input_parser():
sys_args=['-f', 'myfile']
exts=["test"]
models=["test_model"]
assert console.input_parser(models, exts, sys_args)==["test_model","myfile","test"]
with pytest.raises(ConsoleError):
console.input_parser("", "", sys_args)
| gorbinphilip/PyRegistrar | pyregistrar/test/test_console.py | Python | mit | 577 |
import sys
import petsc4py
petsc4py.init(sys.argv)
from ecoli_in_pipe import head_tail
# import numpy as np
# from scipy.interpolate import interp1d
# from petsc4py import PETSc
# from ecoli_in_pipe import single_ecoli, ecoliInPipe, head_tail, ecoli_U
# from codeStore import ecoli_common
#
#
# def call_head_tial(uz_factor=1., wz_factor=1.):
# PETSc.Sys.Print('')
# PETSc.Sys.Print('################################################### uz_factor = %f, wz_factor = %f' %
# (uz_factor, wz_factor))
# t_head_U = head_U.copy()
# t_tail_U = tail_U.copy()
# t_head_U[2] = t_head_U[2] * uz_factor
# t_tail_U[2] = t_tail_U[2] * uz_factor
# # C1 = t_head_U[5] - t_tail_U[5]
# # C2 = t_head_U[5] / t_tail_U[5]
# # t_head_U[5] = wz_factor * C1 * C2 / (wz_factor * C2 - 1)
# # t_tail_U[5] = C1 / (wz_factor * C2 - 1)
# t_head_U[5] = wz_factor * t_head_U[5]
# t_kwargs = {'head_U': t_head_U,
# 'tail_U': t_tail_U, }
# total_force = head_tail.main_fun()
# return total_force
#
#
# OptDB = PETSc.Options()
# fileHandle = OptDB.getString('f', 'ecoliInPipe')
# OptDB.setValue('f', fileHandle)
# main_kwargs = {'fileHandle': fileHandle}
# # head_U, tail_U, ref_U = ecoli_common.ecoli_restart(**main_kwargs)
# # ecoli_common.ecoli_restart(**main_kwargs)
# head_U = np.array([0, 0, 1, 0, 0, 1])
# tail_U = np.array([0, 0, 1, 0, 0, 1])
# call_head_tial()
head_tail.main_fun()
| pcmagic/stokes_flow | ecoli_in_pipe/wrapper_head_tail.py | Python | mit | 1,450 |
# Python Code From Book
# This file consists of code snippets only
# It is not intended to be run as a script
raise SystemExit
####################################################################
# 3. Thinking in Binary
####################################################################
import magic
print magic.from_file("my_image.jpg")
# JPEG image data, Exif standard: [TIFF image data, big-endian,
# direntries=16, height=3264, bps=0, PhotometricIntepretation=RGB],
# baseline, precision 8, 2378x2379, frames 3
if magic.from_file("upload.jpg", mime=True) == "image/jpeg":
continue_uploading("upload.jpg")
else:
alert("Sorry! This file type is not allowed")
import imghdr
print imghdr.what("path/to/my/file.ext")
import binascii
def spoof_file(file, magic_number):
magic_number = binascii.unhexlify(magic_number)
with open(file, "r+b") as f:
old = f.read()
f.seek(0)
f.write(magic_number + old)
def to_ascii_bytes(string):
return " ".join(format(ord(char), '08b') for char in string)
string = "my ascii string"
"".join(hex(ord(char))[2:] for char in string)
# '6d7920617363696920737472696e67'
hex_string = "6d7920617363696920737472696e67"
hex_string.decode("hex")
# 'my ascii string'
"".join(chr(int(hex_string[i:i+2], 16)) for i in range(0, len(hex_string), 2))
# 'my ascii string'
# adapted from https://code.activestate.com/recipes/142812-hex-dumper/
def hexdump(string, length=8):
result = []
digits = 4 if isinstance(string, unicode) else 2
for i in xrange(0, len(string), length):
s = string[i:i + length]
hexa = "".join("{:0{}X}".format(ord(x), digits) for x in s)
text = "".join(x if 0x20 <= ord(x) < 0x7F else '.' for x in s)
result.append("{:04X} {:{}} {}".format(i, hexa, length * (digits + 1), text))
return '\n'.join(result)
with open("/path/to/my_file.ext", "r") as f:
print hexdump(f.read())
import struct
num = 0x103e4
struct.pack("I", 0x103e4)
# '\xe4\x03\x01\x00'
string = '\xe4\x03\x01\x00'
struct.unpack("i", string)
# (66532,)
bytes = '\x01\xc2'
struct.pack("<h", struct.unpack(">h", bytes)[0])
# '\xc2\x01'
import base64
base64.b64encode('encodings are fun...')
# 'ZW5jb2RpbmdzIGFyZSBmdW4uLi4='
base64.b64decode(_)
# 'encodings are fun...'
string = "hello\x00"
binary_string = ' '.join('{:08b}'.format(ord(char)) for char in string)
" ".join(binary_string[i:i+6] for i in range(0, len(binary_string), 6))
# '011010 000110 010101 101100 011011 000110 111100 000000'
bin_string = '011010 000110 010101 101100 011011 000110 111100 000000'
[int(b, 2) for b in bin_string.split()]
# [26, 6, 21, 44, 27, 6, 60, 0]
u'◑ \u2020'.encode('utf8')
# '\xe2\x97\x91 \xe2\x80\xa0'
'\xe2\x97\x91 \xe2\x80\xa0'.decode('utf8')
# u'\u25d1 \u2020'
unicode('\xe2\x97\x91 \xe2\x80\xa0', encoding='utf8')
# u'\u25d1 \u2020'
utf8_string = 'Åêíòü'
utf8_string
# '\xc3\x85\xc3\xaa\xc3\xad\xc3\xb2\xc3\xbc'
unicode_string = utf8_string.decode('utf8')
unicode_string
# u'\xc5\xea\xed\xf2\xfc'
unicode_string.encode('mac roman')
# '\x81\x90\x92\x98\x9f'
'Åêíòü'.decode('utf8').encode('ascii')
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# UnicodeEncodeError: 'ascii' codec can't encode characters in position 0-4: ordinal not in range(128)
file = """潍楪慢敫椠桴慧扲敬整瑸琠慨⁴獩琠敨爠獥汵⁴景琠硥⁴敢湩敤潣敤獵湩湡甠楮瑮湥敤档
牡捡整湥潣楤杮楷桴挠浯汰瑥汥⁹湵敲慬整湯獥景整牦浯愠搠晩敦敲瑮眠楲楴杮猠獹整‧⠊慔敫
牦浯攠楷楫数楤牯⥧"""
print file.decode('utf8').encode('utf16')
# ??Mojibake is the garbled text that is the result of text being decoded using an
# unintended character encoding with completely unrelated ones, often from a
# different writing system.' (Taken from en.wikipedia.org)
import ftfy
ftfy.fix_text(u"“Mojibake“ can be fixed.")
# u'"Mojibake" can be fixed.'
bin(0b1010 & 0b1111110111)
# '0b10'
bin(0b1010 | 0b0110)
# '0b1110'
bin(0b10111 | 0b01000)
# '0b11111'
bin(0b100 ^ 0b110)
# '0b10'
bin(-0b1010 >> 0b10)
# '-0b11'
x = 0b1111
y = 0b1010
bin(int("{:b}{:b}".format(x, y), 2))
# '0b11111010'
bin(x << 4 | y)
# '0b11111010'
####################################################################
# 4. Cryptography
####################################################################
import random
import string
r = random.SystemRandom()
# Get a random integer between 0 and 20
r.randint(0, 20)
# 5
# Get a random number between 0 and 1
r.random()
# 0.8282475835972263
# Generate a random 40-bit number
r.getrandbits(40)
# 595477188771L
# Choose a random item from a string or list
chars = string.printable
r.choice(chars)
# 'e'
# Randomize the order of a sequence
seq = ['a', 'b', 'c', 'd', 'e']
r.shuffle(seq)
print seq
# ['c','d', 'a', 'e', 'b']
"ALLIGATOR".encode('rot13')
# 'NYYVTNGBE'
"NYYVTNGBE".encode('rot13')
# 'ALLIGATOR'
plaintext = "A secret-ish message!"
"".join(chr((ord(c) + 20) % 256) for c in plaintext)
# 'U4\x87yw\x86y\x88A}\x87|4\x81y\x87\x87u{y5'
ciphertext = 'U4\x87yw\x86y\x88A}\x87|4\x81y\x87\x87u{y5'
"".join(chr((ord(c) - 20) % 256) for c in ciphertext)
# 'A secret-ish message!'
plaintext = 0b110100001101001
one_time_pad = 0b110000011100001
bin(plaintext ^ one_time_pad)
# '0b100010001000'
decrypted = 0b100010001000 ^ one_time_pad
format(decrypted, 'x').decode('hex')
# 'hi'
import os
import binascii
# ASCII-encoded plaintext
plaintext = "this is a secret message"
plaintext_bits = int(binascii.hexlify(plaintext), 16)
print "plaintext (ascii):", plaintext
print "plaintext (hex):", plaintext_bits
# Generate the one-time pad
onetime_pad = int(binascii.hexlify(os.urandom(len(plaintext))), 16)
print "one-time pad: (hex):", onetime_pad
# Encrypt plaintext using XOR operation with one-time pad
ciphertext_bits = plaintext_bits ^ onetime_pad
print "encrypted text (hex):", ciphertext_bits
# Decrypt using XOR operation with one-time pad
decrypted_text = ciphertext_bits ^ onetime_pad
decrypted_text = binascii.unhexlify(hex(decrypted_text)[2:-1])
print "decrypted text (ascii):", decrypted_text
import random
import binascii
p1 = "this is the part where you run away"
p2 = "from bad cryptography practices."
# pad plaintexts with spaces to ensure equal length
p1 = p1.ljust(len(p2))
p2 = p2.ljust(len(p1))
p1 = int(binascii.hexlify(p1), 16)
p2 = int(binascii.hexlify(p2), 16)
# get random one-time pad
otp = random.SystemRandom().getrandbits(p1.bit_length())
# encrypt
c1 = p1 ^ otp
c2 = p2 ^ otp # otp reuse...not good!
print "c1 ^ c2 == p1 ^ p2 ?", c1 ^ c2 == p1 ^ p2
print "c1 ^ c2 =", hex(c1 ^ c2)
# the crib
crib = " the "
crib = int(binascii.hexlify(crib), 16)
xored = c1 ^ c2
print "crib =", hex(crib)
cbl = crib.bit_length()
xbl = xored.bit_length()
print
mask = (2**(cbl + 1) - 1)
fill = len(str(xbl / 8))
# crib dragging
for s in range(0, xbl - cbl + 8, 8):
xor = (xored ^ (crib << s)) & (mask << s)
out = binascii.unhexlify(hex(xor)[2:-1])
print "{:>{}} {}".format(s/8, fill, out)
from cryptography.fernet import Fernet
key = Fernet.generate_key()
f = Fernet(key)
ciphertext = f.encrypt("this is my plaintext")
decrypted = f.decrypt(ciphertext)
print decrypted
# this is my plaintext
import os
from cryptography.hazmat.primitives import padding
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
pt = "my plaintext"
backend = default_backend()
key = os.urandom(32)
iv = os.urandom(16)
padder = padding.PKCS7(128).padder()
pt = padder.update(pt) + padder.finalize()
cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=backend)
encryptor = cipher.encryptor()
ct = encryptor.update(pt) + encryptor.finalize()
decryptor = cipher.decryptor()
out = decryptor.update(ct) + decryptor.finalize()
unpadder = padding.PKCS7(128).unpadder()
out = unpadder.update(out) + unpadder.finalize()
print out
import hashlib
hashlib.md5("hash me please").hexdigest()
# '760d92b6a6f974ae11904cd0a6fc2e90'
hashlib.sha1("hash me please").hexdigest()
# '1a58c9b3d138a45519518ee42e634600d1b52153'
import os
from cryptography.hazmat.primitives.kdf.scrypt import Scrypt
from cryptography.hazmat.backends import default_backend
backend = default_backend()
salt = os.urandom(16)
kdf = Scrypt(salt=salt, length=64, n=2**14, r=8, p=1, backend=backend)
key = kdf.derive("your favorite password")
key
import hmac
import hashlib
secret_key = "my secret key"
ciphertext = "my ciphertext"
# generate HMAC
h = hmac.new(key=secret_key, msg=ciphertext, digestmod=hashlib.sha256)
print h.hexdigest()
# verify HMAC
hmac.compare_digest(h.hexdigest(), h.hexdigest())
p = 9576890767
q = 1299827
n = p * q
print n
# 12448301194997309
e = 65537
phi = (p - 1) * (q - 1)
phi % e != 0
# True
import sympy
d = sympy.numbers.igcdex(e, phi)[0]
print d
# 1409376745910033
m = 12345
c = pow(m, e, n)
print c
# 3599057382134015
pow(c, d, n)
# 12345
m = 0
while pow(m, e, n) != c:
m += 1
print m
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import serialization
private_key = rsa.generate_private_key(public_exponent=65537, key_size=2048, b
ackend=default_backend())
public_key = private_key.public_key()
private_pem = private_key.private_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.BestAvailableEncryption('your password here'))
public_pem = public_key.public_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo)
print public_pem
print private_pem
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
import base64
with open("path/to/public_key.pem", "rb") as key_file:
public_key = serialization.load_pem_public_key(key_file.read(),
backend=default_backend())
message = "your secret message"
ciphertext = public_key.encrypt(message,
padding.OAEP(mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None))
b64_ciphertext = base64.urlsafe_b64encode(ciphertext)
print b64_ciphertext
plaintext = private_key.decrypt(ciphertext,
padding.OAEP(mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None))
print plaintext
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
signer = private_key.signer(padding.PSS(mgf=padding.MGF1(hashes.SHA256()), salt_length=padding.PSS.MAX_LENGTH), hashes.SHA256())
message = "A message of arbitrary length"
signer.update(message)
signature = signer.finalize()
public_key = private_key.public_key()
verifier = public_key.verifier(signature, padding.PSS(mgf=padding.MGF1(hashes.SHA256()), salt_length=padding.PSS.MAX_LENGTH), hashes.SHA256())
verifier.update(message)
verifier.verify()
####################################################################
# 5. Networking
####################################################################
import requests
r = requests.get('https://www.google.com/imghp')
r.content[:200]
# View status code
r.status_code
# 200
# View response header fields
r.headers
# {'Alt-Svc': 'quic=":443"; ma=2592000; v="36,35,34"',
# 'Cache-Control': 'private, max-age=0',
# 'Content-Encoding': 'gzip',
# 'Content-Type': 'text/html; charset=ISO-8859-1',
# 'Expires': '-1',
# 'P3P': 'CP="This is not a P3P policy! See https://www.google.com/support/accounts/answer/151657?hl=en for more info."',
# 'Server': 'gws',
# path=/; domain=.google.com; HttpOnly',
# 'Transfer-Encoding': 'chunked',
# 'X-Frame-Options': 'SAMEORIGIN',
# 'X-XSS-Protection': '1; mode=block'}
# Get content length in bytes
len(r.content)
# 10971
# Encoding
r.apparent_encoding
# 'ISO-8859-2'
# Time elapsed during request
r.elapsed
# datetime.timedelta(0, 0, 454447)
r.request.headers
# {'Accept': '*/*',
# 'Accept-Encoding': 'gzip, deflate',
# 'Connection': 'keep-alive',
# 'User-Agent': 'python-requests/2.12.4'}
custom_headers = {"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36"}
r = requests.get("https://www.google.com/imghp", headers=custom_headers)
r.request.headers
# {'Accept': '*/*',
# 'Accept-Encoding': 'gzip, deflate',
# 'Connection': 'keep-alive',
# 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'}
import requests
import logging
import http.client as http_client
http_client.HTTPConnection.debuglevel = 1
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
r = requests.get('https://www.google.com/')
# send: 'GET / HTTP/1.1\r\nHost: www.google.com\r\nConnection: keep-alive\r\nAccept-Encoding: gzip, deflate\r\nAccept: */*\r\nUser-Agent: python-requests/2.12.4\r\n\r\n'
# reply: 'HTTP/1.1 200 OK\r\n'
# header: Expires: -1
# header: Cache-Control: private, max-age=0
# header: Content-Type: text/html; charset=ISO-8859-1
# header: P3P: CP="This is not a P3P policy! See https://www.google.com/support/accounts/answer/151657?hl=en for more info."
# header: Content-Encoding: gzip
# header: Server: gws
# header: X-XSS-Protection: 1; mode=block
# header: X-Frame-Options: SAMEORIGIN
import urlparse
simple_url = "http://www.example.com/path/to/my/page"
parsed = urlparse.urlparse(simple_url)
parsed.scheme
parsed.hostname
parsed.path
url_with_query = "http://www.example.com/?page=1&key=Anvn4mo24"
query = urlparse.urlparse(url_with_query).query
urlparse.parse_qs(query)
# {'key': ['Anvn4mo24'], 'page': ['1']}
import urllib
url = 'https://www.example.com/%5EA-url-with-%-and-%5E?page=page+with%20spaces'
urllib.unquote(url)
# 'https://www.example.com/^A-url-with-%-and-^?page=page+with spaces'
chars = '!@#$%^%$#)'
urllib.quote(chars)
# '%21%40%23%24%25%5E%25%24%23%29'
urllib.unquote_plus(url)
# 'https://www.example.com/^A-url-with-%-and-^?page=page with spaces'
urllib.quote_plus('one two')
'one+two'
import requests
from bs4 import BeautifulSoup
r = requests.get("http://www.google.com")
soup = BeautifulSoup(r.content, "lxml")
soup.find_all('p')
soup.find_all('a')
# [<a class="gb1" href="http://www.google.com/imghp?hl=en&tab=wi">Images</a>,
# <a class="gb1" href="http://maps.google.com/maps?hl=en&tab=wl">Maps</a>,
# <a class="gb1" href="https://play.google.com/?hl=en&tab=w8">Play</a>,
# <a class="gb1" href="http://www.youtube.com/?tab=w1">YouTube</a>,
# <a class="gb1" href="http://news.google.com/nwshp?hl=en&tab=wn">News</a>,
# …]
for link in soup.find_all('a'):
print link.text, link["href"]
# Images http://www.google.com/imghp?hl=en&tab=wi
# Maps http://maps.google.com/maps?hl=en&tab=wl
# Play https://play.google.com/?hl=en&tab=w8
# YouTube http://www.youtube.com/?tab=w1
import dryscrape
from bs4 import BeautifulSoup
session = dryscrape.Session()
session.visit("http://www.google.com")
r = session.body()
soup = BeautifulSoup(r, "lxml")
from selenium import webdriver
driver = webdriver.Chrome("/path/to/chromedriver")
driver.get("http://www.google.com")
html = driver.page_source
driver.save_screenshot("screenshot.png")
driver.quit()
import smtplib
server = smtplib.SMTP('localhost', port=1025)
server.set_debuglevel(True)
server.sendmail("me@localhost", "you@localhost", "This is an email message")
server.quit()
| 0ppen/introhacking | code_from_book.py | Python | mit | 16,192 |
from django.contrib.auth import get_user_model
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from rest_framework import routers, serializers, viewsets, permissions
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from rest_framework.reverse import reverse
from .models import Comment
# from accounts.models import MyUser
User = get_user_model()
class CommentVideoUrlHyperlinkedIdentityField(serializers.HyperlinkedIdentityField):
def get_url(self, obj,view_name,request,format):
kwargs = {
"cat_slug":obj.video.category.slug,
"vid_slug":obj.video.slug
}
# print(reverse(view_name,kwargs=kwargs))
return reverse(view_name,kwargs=kwargs,request=request,format=format)
class CommentUpdateSerializer(serializers.ModelSerializer):
user = serializers.CharField(source='user.username',read_only=True)
class Meta:
model = Comment
fields = [
'id',
'user',
'text'
]
class CommentCreateSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = [
'text',
'user',
'video',
'parent'
]
class ChildCommentSerializer(serializers.HyperlinkedModelSerializer):
# user = serializers.PrimaryKeyRelatedField(queryset=User.objects.all())
user = serializers.CharField(source='user.username',read_only=True)
class Meta:
model = Comment
fields = [
'id',
"user",
'text'
]
class CommentSerializer(serializers.HyperlinkedModelSerializer):
url = serializers.HyperlinkedIdentityField("comment_detail_api",lookup_field="pk")
# user = serializers.PrimaryKeyRelatedField(queryset=User.objects.all())
video = CommentVideoUrlHyperlinkedIdentityField("video_detail_api")
user = serializers.CharField(source='user.username',read_only=True)
children = serializers.SerializerMethodField(read_only=True)
def get_children(self,instance):
# queryset = instance.get_children()
queryset = Comment.objects.filter(parent__pk =instance.pk)
serializer = ChildCommentSerializer(queryset,context={"request":instance}, many=True)
return serializer.data
class Meta:
model = Comment
fields = [
"url",
'id',
"children",
# "parent",
"user",
'video',
'text'
]
class CommentViewSet(viewsets.ModelViewSet):
authentication_classes = [SessionAuthentication, BasicAuthentication, JSONWebTokenAuthentication]
permission_classes = [permissions.IsAuthenticated,]
queryset = Comment.objects.all()
serializer_class = CommentSerializer
| climberwb/video-api | src/comments/serializers.py | Python | mit | 2,962 |
<<<<<<< HEAD
<<<<<<< HEAD
""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP852.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp852',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x016f, # LATIN SMALL LETTER U WITH RING ABOVE
0x0086: 0x0107, # LATIN SMALL LETTER C WITH ACUTE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x0142, # LATIN SMALL LETTER L WITH STROKE
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x0150, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
0x008b: 0x0151, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x0139, # LATIN CAPITAL LETTER L WITH ACUTE
0x0092: 0x013a, # LATIN SMALL LETTER L WITH ACUTE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x013d, # LATIN CAPITAL LETTER L WITH CARON
0x0096: 0x013e, # LATIN SMALL LETTER L WITH CARON
0x0097: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE
0x0098: 0x015b, # LATIN SMALL LETTER S WITH ACUTE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x0164, # LATIN CAPITAL LETTER T WITH CARON
0x009c: 0x0165, # LATIN SMALL LETTER T WITH CARON
0x009d: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE
0x009e: 0x00d7, # MULTIPLICATION SIGN
0x009f: 0x010d, # LATIN SMALL LETTER C WITH CARON
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
0x00a5: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
0x00a6: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x00a7: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x00a8: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
0x00a9: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE
0x00ac: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
0x00ad: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00b7: 0x011a, # LATIN CAPITAL LETTER E WITH CARON
0x00b8: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x00be: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x0102, # LATIN CAPITAL LETTER A WITH BREVE
0x00c7: 0x0103, # LATIN SMALL LETTER A WITH BREVE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x0111, # LATIN SMALL LETTER D WITH STROKE
0x00d1: 0x0110, # LATIN CAPITAL LETTER D WITH STROKE
0x00d2: 0x010e, # LATIN CAPITAL LETTER D WITH CARON
0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00d4: 0x010f, # LATIN SMALL LETTER D WITH CARON
0x00d5: 0x0147, # LATIN CAPITAL LETTER N WITH CARON
0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d8: 0x011b, # LATIN SMALL LETTER E WITH CARON
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x0162, # LATIN CAPITAL LETTER T WITH CEDILLA
0x00de: 0x016e, # LATIN CAPITAL LETTER U WITH RING ABOVE
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00e3: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE
0x00e4: 0x0144, # LATIN SMALL LETTER N WITH ACUTE
0x00e5: 0x0148, # LATIN SMALL LETTER N WITH CARON
0x00e6: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00e7: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00e8: 0x0154, # LATIN CAPITAL LETTER R WITH ACUTE
0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00ea: 0x0155, # LATIN SMALL LETTER R WITH ACUTE
0x00eb: 0x0170, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
0x00ec: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x00ed: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00ee: 0x0163, # LATIN SMALL LETTER T WITH CEDILLA
0x00ef: 0x00b4, # ACUTE ACCENT
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x02dd, # DOUBLE ACUTE ACCENT
0x00f2: 0x02db, # OGONEK
0x00f3: 0x02c7, # CARON
0x00f4: 0x02d8, # BREVE
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x00b8, # CEDILLA
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x00a8, # DIAERESIS
0x00fa: 0x02d9, # DOT ABOVE
0x00fb: 0x0171, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
0x00fc: 0x0158, # LATIN CAPITAL LETTER R WITH CARON
0x00fd: 0x0159, # LATIN SMALL LETTER R WITH CARON
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
'\u016f' # 0x0085 -> LATIN SMALL LETTER U WITH RING ABOVE
'\u0107' # 0x0086 -> LATIN SMALL LETTER C WITH ACUTE
'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
'\u0142' # 0x0088 -> LATIN SMALL LETTER L WITH STROKE
'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
'\u0150' # 0x008a -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
'\u0151' # 0x008b -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\u0179' # 0x008d -> LATIN CAPITAL LETTER Z WITH ACUTE
'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\u0106' # 0x008f -> LATIN CAPITAL LETTER C WITH ACUTE
'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
'\u0139' # 0x0091 -> LATIN CAPITAL LETTER L WITH ACUTE
'\u013a' # 0x0092 -> LATIN SMALL LETTER L WITH ACUTE
'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
'\u013d' # 0x0095 -> LATIN CAPITAL LETTER L WITH CARON
'\u013e' # 0x0096 -> LATIN SMALL LETTER L WITH CARON
'\u015a' # 0x0097 -> LATIN CAPITAL LETTER S WITH ACUTE
'\u015b' # 0x0098 -> LATIN SMALL LETTER S WITH ACUTE
'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\u0164' # 0x009b -> LATIN CAPITAL LETTER T WITH CARON
'\u0165' # 0x009c -> LATIN SMALL LETTER T WITH CARON
'\u0141' # 0x009d -> LATIN CAPITAL LETTER L WITH STROKE
'\xd7' # 0x009e -> MULTIPLICATION SIGN
'\u010d' # 0x009f -> LATIN SMALL LETTER C WITH CARON
'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
'\u0104' # 0x00a4 -> LATIN CAPITAL LETTER A WITH OGONEK
'\u0105' # 0x00a5 -> LATIN SMALL LETTER A WITH OGONEK
'\u017d' # 0x00a6 -> LATIN CAPITAL LETTER Z WITH CARON
'\u017e' # 0x00a7 -> LATIN SMALL LETTER Z WITH CARON
'\u0118' # 0x00a8 -> LATIN CAPITAL LETTER E WITH OGONEK
'\u0119' # 0x00a9 -> LATIN SMALL LETTER E WITH OGONEK
'\xac' # 0x00aa -> NOT SIGN
'\u017a' # 0x00ab -> LATIN SMALL LETTER Z WITH ACUTE
'\u010c' # 0x00ac -> LATIN CAPITAL LETTER C WITH CARON
'\u015f' # 0x00ad -> LATIN SMALL LETTER S WITH CEDILLA
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\u011a' # 0x00b7 -> LATIN CAPITAL LETTER E WITH CARON
'\u015e' # 0x00b8 -> LATIN CAPITAL LETTER S WITH CEDILLA
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u017b' # 0x00bd -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
'\u017c' # 0x00be -> LATIN SMALL LETTER Z WITH DOT ABOVE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u0102' # 0x00c6 -> LATIN CAPITAL LETTER A WITH BREVE
'\u0103' # 0x00c7 -> LATIN SMALL LETTER A WITH BREVE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\xa4' # 0x00cf -> CURRENCY SIGN
'\u0111' # 0x00d0 -> LATIN SMALL LETTER D WITH STROKE
'\u0110' # 0x00d1 -> LATIN CAPITAL LETTER D WITH STROKE
'\u010e' # 0x00d2 -> LATIN CAPITAL LETTER D WITH CARON
'\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\u010f' # 0x00d4 -> LATIN SMALL LETTER D WITH CARON
'\u0147' # 0x00d5 -> LATIN CAPITAL LETTER N WITH CARON
'\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\u011b' # 0x00d8 -> LATIN SMALL LETTER E WITH CARON
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u0162' # 0x00dd -> LATIN CAPITAL LETTER T WITH CEDILLA
'\u016e' # 0x00de -> LATIN CAPITAL LETTER U WITH RING ABOVE
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
'\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\u0143' # 0x00e3 -> LATIN CAPITAL LETTER N WITH ACUTE
'\u0144' # 0x00e4 -> LATIN SMALL LETTER N WITH ACUTE
'\u0148' # 0x00e5 -> LATIN SMALL LETTER N WITH CARON
'\u0160' # 0x00e6 -> LATIN CAPITAL LETTER S WITH CARON
'\u0161' # 0x00e7 -> LATIN SMALL LETTER S WITH CARON
'\u0154' # 0x00e8 -> LATIN CAPITAL LETTER R WITH ACUTE
'\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE
'\u0155' # 0x00ea -> LATIN SMALL LETTER R WITH ACUTE
'\u0170' # 0x00eb -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
'\xfd' # 0x00ec -> LATIN SMALL LETTER Y WITH ACUTE
'\xdd' # 0x00ed -> LATIN CAPITAL LETTER Y WITH ACUTE
'\u0163' # 0x00ee -> LATIN SMALL LETTER T WITH CEDILLA
'\xb4' # 0x00ef -> ACUTE ACCENT
'\xad' # 0x00f0 -> SOFT HYPHEN
'\u02dd' # 0x00f1 -> DOUBLE ACUTE ACCENT
'\u02db' # 0x00f2 -> OGONEK
'\u02c7' # 0x00f3 -> CARON
'\u02d8' # 0x00f4 -> BREVE
'\xa7' # 0x00f5 -> SECTION SIGN
'\xf7' # 0x00f6 -> DIVISION SIGN
'\xb8' # 0x00f7 -> CEDILLA
'\xb0' # 0x00f8 -> DEGREE SIGN
'\xa8' # 0x00f9 -> DIAERESIS
'\u02d9' # 0x00fa -> DOT ABOVE
'\u0171' # 0x00fb -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
'\u0158' # 0x00fc -> LATIN CAPITAL LETTER R WITH CARON
'\u0159' # 0x00fd -> LATIN SMALL LETTER R WITH CARON
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a4: 0x00cf, # CURRENCY SIGN
0x00a7: 0x00f5, # SECTION SIGN
0x00a8: 0x00f9, # DIAERESIS
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b4: 0x00ef, # ACUTE ACCENT
0x00b8: 0x00f7, # CEDILLA
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE
0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d7: 0x009e, # MULTIPLICATION SIGN
0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00dd: 0x00ed, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00fd: 0x00ec, # LATIN SMALL LETTER Y WITH ACUTE
0x0102: 0x00c6, # LATIN CAPITAL LETTER A WITH BREVE
0x0103: 0x00c7, # LATIN SMALL LETTER A WITH BREVE
0x0104: 0x00a4, # LATIN CAPITAL LETTER A WITH OGONEK
0x0105: 0x00a5, # LATIN SMALL LETTER A WITH OGONEK
0x0106: 0x008f, # LATIN CAPITAL LETTER C WITH ACUTE
0x0107: 0x0086, # LATIN SMALL LETTER C WITH ACUTE
0x010c: 0x00ac, # LATIN CAPITAL LETTER C WITH CARON
0x010d: 0x009f, # LATIN SMALL LETTER C WITH CARON
0x010e: 0x00d2, # LATIN CAPITAL LETTER D WITH CARON
0x010f: 0x00d4, # LATIN SMALL LETTER D WITH CARON
0x0110: 0x00d1, # LATIN CAPITAL LETTER D WITH STROKE
0x0111: 0x00d0, # LATIN SMALL LETTER D WITH STROKE
0x0118: 0x00a8, # LATIN CAPITAL LETTER E WITH OGONEK
0x0119: 0x00a9, # LATIN SMALL LETTER E WITH OGONEK
0x011a: 0x00b7, # LATIN CAPITAL LETTER E WITH CARON
0x011b: 0x00d8, # LATIN SMALL LETTER E WITH CARON
0x0139: 0x0091, # LATIN CAPITAL LETTER L WITH ACUTE
0x013a: 0x0092, # LATIN SMALL LETTER L WITH ACUTE
0x013d: 0x0095, # LATIN CAPITAL LETTER L WITH CARON
0x013e: 0x0096, # LATIN SMALL LETTER L WITH CARON
0x0141: 0x009d, # LATIN CAPITAL LETTER L WITH STROKE
0x0142: 0x0088, # LATIN SMALL LETTER L WITH STROKE
0x0143: 0x00e3, # LATIN CAPITAL LETTER N WITH ACUTE
0x0144: 0x00e4, # LATIN SMALL LETTER N WITH ACUTE
0x0147: 0x00d5, # LATIN CAPITAL LETTER N WITH CARON
0x0148: 0x00e5, # LATIN SMALL LETTER N WITH CARON
0x0150: 0x008a, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
0x0151: 0x008b, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
0x0154: 0x00e8, # LATIN CAPITAL LETTER R WITH ACUTE
0x0155: 0x00ea, # LATIN SMALL LETTER R WITH ACUTE
0x0158: 0x00fc, # LATIN CAPITAL LETTER R WITH CARON
0x0159: 0x00fd, # LATIN SMALL LETTER R WITH CARON
0x015a: 0x0097, # LATIN CAPITAL LETTER S WITH ACUTE
0x015b: 0x0098, # LATIN SMALL LETTER S WITH ACUTE
0x015e: 0x00b8, # LATIN CAPITAL LETTER S WITH CEDILLA
0x015f: 0x00ad, # LATIN SMALL LETTER S WITH CEDILLA
0x0160: 0x00e6, # LATIN CAPITAL LETTER S WITH CARON
0x0161: 0x00e7, # LATIN SMALL LETTER S WITH CARON
0x0162: 0x00dd, # LATIN CAPITAL LETTER T WITH CEDILLA
0x0163: 0x00ee, # LATIN SMALL LETTER T WITH CEDILLA
0x0164: 0x009b, # LATIN CAPITAL LETTER T WITH CARON
0x0165: 0x009c, # LATIN SMALL LETTER T WITH CARON
0x016e: 0x00de, # LATIN CAPITAL LETTER U WITH RING ABOVE
0x016f: 0x0085, # LATIN SMALL LETTER U WITH RING ABOVE
0x0170: 0x00eb, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
0x0171: 0x00fb, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
0x0179: 0x008d, # LATIN CAPITAL LETTER Z WITH ACUTE
0x017a: 0x00ab, # LATIN SMALL LETTER Z WITH ACUTE
0x017b: 0x00bd, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x017c: 0x00be, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x017d: 0x00a6, # LATIN CAPITAL LETTER Z WITH CARON
0x017e: 0x00a7, # LATIN SMALL LETTER Z WITH CARON
0x02c7: 0x00f3, # CARON
0x02d8: 0x00f4, # BREVE
0x02d9: 0x00fa, # DOT ABOVE
0x02db: 0x00f2, # OGONEK
0x02dd: 0x00f1, # DOUBLE ACUTE ACCENT
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
=======
""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP852.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp852',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x016f, # LATIN SMALL LETTER U WITH RING ABOVE
0x0086: 0x0107, # LATIN SMALL LETTER C WITH ACUTE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x0142, # LATIN SMALL LETTER L WITH STROKE
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x0150, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
0x008b: 0x0151, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x0139, # LATIN CAPITAL LETTER L WITH ACUTE
0x0092: 0x013a, # LATIN SMALL LETTER L WITH ACUTE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x013d, # LATIN CAPITAL LETTER L WITH CARON
0x0096: 0x013e, # LATIN SMALL LETTER L WITH CARON
0x0097: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE
0x0098: 0x015b, # LATIN SMALL LETTER S WITH ACUTE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x0164, # LATIN CAPITAL LETTER T WITH CARON
0x009c: 0x0165, # LATIN SMALL LETTER T WITH CARON
0x009d: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE
0x009e: 0x00d7, # MULTIPLICATION SIGN
0x009f: 0x010d, # LATIN SMALL LETTER C WITH CARON
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
0x00a5: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
0x00a6: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x00a7: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x00a8: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
0x00a9: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE
0x00ac: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
0x00ad: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00b7: 0x011a, # LATIN CAPITAL LETTER E WITH CARON
0x00b8: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x00be: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x0102, # LATIN CAPITAL LETTER A WITH BREVE
0x00c7: 0x0103, # LATIN SMALL LETTER A WITH BREVE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x0111, # LATIN SMALL LETTER D WITH STROKE
0x00d1: 0x0110, # LATIN CAPITAL LETTER D WITH STROKE
0x00d2: 0x010e, # LATIN CAPITAL LETTER D WITH CARON
0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00d4: 0x010f, # LATIN SMALL LETTER D WITH CARON
0x00d5: 0x0147, # LATIN CAPITAL LETTER N WITH CARON
0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d8: 0x011b, # LATIN SMALL LETTER E WITH CARON
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x0162, # LATIN CAPITAL LETTER T WITH CEDILLA
0x00de: 0x016e, # LATIN CAPITAL LETTER U WITH RING ABOVE
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00e3: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE
0x00e4: 0x0144, # LATIN SMALL LETTER N WITH ACUTE
0x00e5: 0x0148, # LATIN SMALL LETTER N WITH CARON
0x00e6: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00e7: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00e8: 0x0154, # LATIN CAPITAL LETTER R WITH ACUTE
0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00ea: 0x0155, # LATIN SMALL LETTER R WITH ACUTE
0x00eb: 0x0170, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
0x00ec: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x00ed: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00ee: 0x0163, # LATIN SMALL LETTER T WITH CEDILLA
0x00ef: 0x00b4, # ACUTE ACCENT
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x02dd, # DOUBLE ACUTE ACCENT
0x00f2: 0x02db, # OGONEK
0x00f3: 0x02c7, # CARON
0x00f4: 0x02d8, # BREVE
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x00b8, # CEDILLA
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x00a8, # DIAERESIS
0x00fa: 0x02d9, # DOT ABOVE
0x00fb: 0x0171, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
0x00fc: 0x0158, # LATIN CAPITAL LETTER R WITH CARON
0x00fd: 0x0159, # LATIN SMALL LETTER R WITH CARON
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
'\u016f' # 0x0085 -> LATIN SMALL LETTER U WITH RING ABOVE
'\u0107' # 0x0086 -> LATIN SMALL LETTER C WITH ACUTE
'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
'\u0142' # 0x0088 -> LATIN SMALL LETTER L WITH STROKE
'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
'\u0150' # 0x008a -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
'\u0151' # 0x008b -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\u0179' # 0x008d -> LATIN CAPITAL LETTER Z WITH ACUTE
'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\u0106' # 0x008f -> LATIN CAPITAL LETTER C WITH ACUTE
'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
'\u0139' # 0x0091 -> LATIN CAPITAL LETTER L WITH ACUTE
'\u013a' # 0x0092 -> LATIN SMALL LETTER L WITH ACUTE
'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
'\u013d' # 0x0095 -> LATIN CAPITAL LETTER L WITH CARON
'\u013e' # 0x0096 -> LATIN SMALL LETTER L WITH CARON
'\u015a' # 0x0097 -> LATIN CAPITAL LETTER S WITH ACUTE
'\u015b' # 0x0098 -> LATIN SMALL LETTER S WITH ACUTE
'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\u0164' # 0x009b -> LATIN CAPITAL LETTER T WITH CARON
'\u0165' # 0x009c -> LATIN SMALL LETTER T WITH CARON
'\u0141' # 0x009d -> LATIN CAPITAL LETTER L WITH STROKE
'\xd7' # 0x009e -> MULTIPLICATION SIGN
'\u010d' # 0x009f -> LATIN SMALL LETTER C WITH CARON
'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
'\u0104' # 0x00a4 -> LATIN CAPITAL LETTER A WITH OGONEK
'\u0105' # 0x00a5 -> LATIN SMALL LETTER A WITH OGONEK
'\u017d' # 0x00a6 -> LATIN CAPITAL LETTER Z WITH CARON
'\u017e' # 0x00a7 -> LATIN SMALL LETTER Z WITH CARON
'\u0118' # 0x00a8 -> LATIN CAPITAL LETTER E WITH OGONEK
'\u0119' # 0x00a9 -> LATIN SMALL LETTER E WITH OGONEK
'\xac' # 0x00aa -> NOT SIGN
'\u017a' # 0x00ab -> LATIN SMALL LETTER Z WITH ACUTE
'\u010c' # 0x00ac -> LATIN CAPITAL LETTER C WITH CARON
'\u015f' # 0x00ad -> LATIN SMALL LETTER S WITH CEDILLA
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\u011a' # 0x00b7 -> LATIN CAPITAL LETTER E WITH CARON
'\u015e' # 0x00b8 -> LATIN CAPITAL LETTER S WITH CEDILLA
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u017b' # 0x00bd -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
'\u017c' # 0x00be -> LATIN SMALL LETTER Z WITH DOT ABOVE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u0102' # 0x00c6 -> LATIN CAPITAL LETTER A WITH BREVE
'\u0103' # 0x00c7 -> LATIN SMALL LETTER A WITH BREVE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\xa4' # 0x00cf -> CURRENCY SIGN
'\u0111' # 0x00d0 -> LATIN SMALL LETTER D WITH STROKE
'\u0110' # 0x00d1 -> LATIN CAPITAL LETTER D WITH STROKE
'\u010e' # 0x00d2 -> LATIN CAPITAL LETTER D WITH CARON
'\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\u010f' # 0x00d4 -> LATIN SMALL LETTER D WITH CARON
'\u0147' # 0x00d5 -> LATIN CAPITAL LETTER N WITH CARON
'\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\u011b' # 0x00d8 -> LATIN SMALL LETTER E WITH CARON
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u0162' # 0x00dd -> LATIN CAPITAL LETTER T WITH CEDILLA
'\u016e' # 0x00de -> LATIN CAPITAL LETTER U WITH RING ABOVE
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
'\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\u0143' # 0x00e3 -> LATIN CAPITAL LETTER N WITH ACUTE
'\u0144' # 0x00e4 -> LATIN SMALL LETTER N WITH ACUTE
'\u0148' # 0x00e5 -> LATIN SMALL LETTER N WITH CARON
'\u0160' # 0x00e6 -> LATIN CAPITAL LETTER S WITH CARON
'\u0161' # 0x00e7 -> LATIN SMALL LETTER S WITH CARON
'\u0154' # 0x00e8 -> LATIN CAPITAL LETTER R WITH ACUTE
'\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE
'\u0155' # 0x00ea -> LATIN SMALL LETTER R WITH ACUTE
'\u0170' # 0x00eb -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
'\xfd' # 0x00ec -> LATIN SMALL LETTER Y WITH ACUTE
'\xdd' # 0x00ed -> LATIN CAPITAL LETTER Y WITH ACUTE
'\u0163' # 0x00ee -> LATIN SMALL LETTER T WITH CEDILLA
'\xb4' # 0x00ef -> ACUTE ACCENT
'\xad' # 0x00f0 -> SOFT HYPHEN
'\u02dd' # 0x00f1 -> DOUBLE ACUTE ACCENT
'\u02db' # 0x00f2 -> OGONEK
'\u02c7' # 0x00f3 -> CARON
'\u02d8' # 0x00f4 -> BREVE
'\xa7' # 0x00f5 -> SECTION SIGN
'\xf7' # 0x00f6 -> DIVISION SIGN
'\xb8' # 0x00f7 -> CEDILLA
'\xb0' # 0x00f8 -> DEGREE SIGN
'\xa8' # 0x00f9 -> DIAERESIS
'\u02d9' # 0x00fa -> DOT ABOVE
'\u0171' # 0x00fb -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
'\u0158' # 0x00fc -> LATIN CAPITAL LETTER R WITH CARON
'\u0159' # 0x00fd -> LATIN SMALL LETTER R WITH CARON
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a4: 0x00cf, # CURRENCY SIGN
0x00a7: 0x00f5, # SECTION SIGN
0x00a8: 0x00f9, # DIAERESIS
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b4: 0x00ef, # ACUTE ACCENT
0x00b8: 0x00f7, # CEDILLA
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE
0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d7: 0x009e, # MULTIPLICATION SIGN
0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00dd: 0x00ed, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00fd: 0x00ec, # LATIN SMALL LETTER Y WITH ACUTE
0x0102: 0x00c6, # LATIN CAPITAL LETTER A WITH BREVE
0x0103: 0x00c7, # LATIN SMALL LETTER A WITH BREVE
0x0104: 0x00a4, # LATIN CAPITAL LETTER A WITH OGONEK
0x0105: 0x00a5, # LATIN SMALL LETTER A WITH OGONEK
0x0106: 0x008f, # LATIN CAPITAL LETTER C WITH ACUTE
0x0107: 0x0086, # LATIN SMALL LETTER C WITH ACUTE
0x010c: 0x00ac, # LATIN CAPITAL LETTER C WITH CARON
0x010d: 0x009f, # LATIN SMALL LETTER C WITH CARON
0x010e: 0x00d2, # LATIN CAPITAL LETTER D WITH CARON
0x010f: 0x00d4, # LATIN SMALL LETTER D WITH CARON
0x0110: 0x00d1, # LATIN CAPITAL LETTER D WITH STROKE
0x0111: 0x00d0, # LATIN SMALL LETTER D WITH STROKE
0x0118: 0x00a8, # LATIN CAPITAL LETTER E WITH OGONEK
0x0119: 0x00a9, # LATIN SMALL LETTER E WITH OGONEK
0x011a: 0x00b7, # LATIN CAPITAL LETTER E WITH CARON
0x011b: 0x00d8, # LATIN SMALL LETTER E WITH CARON
0x0139: 0x0091, # LATIN CAPITAL LETTER L WITH ACUTE
0x013a: 0x0092, # LATIN SMALL LETTER L WITH ACUTE
0x013d: 0x0095, # LATIN CAPITAL LETTER L WITH CARON
0x013e: 0x0096, # LATIN SMALL LETTER L WITH CARON
0x0141: 0x009d, # LATIN CAPITAL LETTER L WITH STROKE
0x0142: 0x0088, # LATIN SMALL LETTER L WITH STROKE
0x0143: 0x00e3, # LATIN CAPITAL LETTER N WITH ACUTE
0x0144: 0x00e4, # LATIN SMALL LETTER N WITH ACUTE
0x0147: 0x00d5, # LATIN CAPITAL LETTER N WITH CARON
0x0148: 0x00e5, # LATIN SMALL LETTER N WITH CARON
0x0150: 0x008a, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
0x0151: 0x008b, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
0x0154: 0x00e8, # LATIN CAPITAL LETTER R WITH ACUTE
0x0155: 0x00ea, # LATIN SMALL LETTER R WITH ACUTE
0x0158: 0x00fc, # LATIN CAPITAL LETTER R WITH CARON
0x0159: 0x00fd, # LATIN SMALL LETTER R WITH CARON
0x015a: 0x0097, # LATIN CAPITAL LETTER S WITH ACUTE
0x015b: 0x0098, # LATIN SMALL LETTER S WITH ACUTE
0x015e: 0x00b8, # LATIN CAPITAL LETTER S WITH CEDILLA
0x015f: 0x00ad, # LATIN SMALL LETTER S WITH CEDILLA
0x0160: 0x00e6, # LATIN CAPITAL LETTER S WITH CARON
0x0161: 0x00e7, # LATIN SMALL LETTER S WITH CARON
0x0162: 0x00dd, # LATIN CAPITAL LETTER T WITH CEDILLA
0x0163: 0x00ee, # LATIN SMALL LETTER T WITH CEDILLA
0x0164: 0x009b, # LATIN CAPITAL LETTER T WITH CARON
0x0165: 0x009c, # LATIN SMALL LETTER T WITH CARON
0x016e: 0x00de, # LATIN CAPITAL LETTER U WITH RING ABOVE
0x016f: 0x0085, # LATIN SMALL LETTER U WITH RING ABOVE
0x0170: 0x00eb, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
0x0171: 0x00fb, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
0x0179: 0x008d, # LATIN CAPITAL LETTER Z WITH ACUTE
0x017a: 0x00ab, # LATIN SMALL LETTER Z WITH ACUTE
0x017b: 0x00bd, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x017c: 0x00be, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x017d: 0x00a6, # LATIN CAPITAL LETTER Z WITH CARON
0x017e: 0x00a7, # LATIN SMALL LETTER Z WITH CARON
0x02c7: 0x00f3, # CARON
0x02d8: 0x00f4, # BREVE
0x02d9: 0x00fa, # DOT ABOVE
0x02db: 0x00f2, # OGONEK
0x02dd: 0x00f1, # DOUBLE ACUTE ACCENT
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP852.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp852',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x016f, # LATIN SMALL LETTER U WITH RING ABOVE
0x0086: 0x0107, # LATIN SMALL LETTER C WITH ACUTE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x0142, # LATIN SMALL LETTER L WITH STROKE
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x0150, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
0x008b: 0x0151, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x0139, # LATIN CAPITAL LETTER L WITH ACUTE
0x0092: 0x013a, # LATIN SMALL LETTER L WITH ACUTE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x013d, # LATIN CAPITAL LETTER L WITH CARON
0x0096: 0x013e, # LATIN SMALL LETTER L WITH CARON
0x0097: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE
0x0098: 0x015b, # LATIN SMALL LETTER S WITH ACUTE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x0164, # LATIN CAPITAL LETTER T WITH CARON
0x009c: 0x0165, # LATIN SMALL LETTER T WITH CARON
0x009d: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE
0x009e: 0x00d7, # MULTIPLICATION SIGN
0x009f: 0x010d, # LATIN SMALL LETTER C WITH CARON
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
0x00a5: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
0x00a6: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x00a7: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x00a8: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
0x00a9: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE
0x00ac: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
0x00ad: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00b7: 0x011a, # LATIN CAPITAL LETTER E WITH CARON
0x00b8: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x00be: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x0102, # LATIN CAPITAL LETTER A WITH BREVE
0x00c7: 0x0103, # LATIN SMALL LETTER A WITH BREVE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x0111, # LATIN SMALL LETTER D WITH STROKE
0x00d1: 0x0110, # LATIN CAPITAL LETTER D WITH STROKE
0x00d2: 0x010e, # LATIN CAPITAL LETTER D WITH CARON
0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00d4: 0x010f, # LATIN SMALL LETTER D WITH CARON
0x00d5: 0x0147, # LATIN CAPITAL LETTER N WITH CARON
0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d8: 0x011b, # LATIN SMALL LETTER E WITH CARON
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x0162, # LATIN CAPITAL LETTER T WITH CEDILLA
0x00de: 0x016e, # LATIN CAPITAL LETTER U WITH RING ABOVE
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00e3: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE
0x00e4: 0x0144, # LATIN SMALL LETTER N WITH ACUTE
0x00e5: 0x0148, # LATIN SMALL LETTER N WITH CARON
0x00e6: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00e7: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00e8: 0x0154, # LATIN CAPITAL LETTER R WITH ACUTE
0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00ea: 0x0155, # LATIN SMALL LETTER R WITH ACUTE
0x00eb: 0x0170, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
0x00ec: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x00ed: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00ee: 0x0163, # LATIN SMALL LETTER T WITH CEDILLA
0x00ef: 0x00b4, # ACUTE ACCENT
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x02dd, # DOUBLE ACUTE ACCENT
0x00f2: 0x02db, # OGONEK
0x00f3: 0x02c7, # CARON
0x00f4: 0x02d8, # BREVE
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x00b8, # CEDILLA
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x00a8, # DIAERESIS
0x00fa: 0x02d9, # DOT ABOVE
0x00fb: 0x0171, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
0x00fc: 0x0158, # LATIN CAPITAL LETTER R WITH CARON
0x00fd: 0x0159, # LATIN SMALL LETTER R WITH CARON
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
'\u016f' # 0x0085 -> LATIN SMALL LETTER U WITH RING ABOVE
'\u0107' # 0x0086 -> LATIN SMALL LETTER C WITH ACUTE
'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
'\u0142' # 0x0088 -> LATIN SMALL LETTER L WITH STROKE
'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
'\u0150' # 0x008a -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
'\u0151' # 0x008b -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\u0179' # 0x008d -> LATIN CAPITAL LETTER Z WITH ACUTE
'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\u0106' # 0x008f -> LATIN CAPITAL LETTER C WITH ACUTE
'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
'\u0139' # 0x0091 -> LATIN CAPITAL LETTER L WITH ACUTE
'\u013a' # 0x0092 -> LATIN SMALL LETTER L WITH ACUTE
'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
'\u013d' # 0x0095 -> LATIN CAPITAL LETTER L WITH CARON
'\u013e' # 0x0096 -> LATIN SMALL LETTER L WITH CARON
'\u015a' # 0x0097 -> LATIN CAPITAL LETTER S WITH ACUTE
'\u015b' # 0x0098 -> LATIN SMALL LETTER S WITH ACUTE
'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\u0164' # 0x009b -> LATIN CAPITAL LETTER T WITH CARON
'\u0165' # 0x009c -> LATIN SMALL LETTER T WITH CARON
'\u0141' # 0x009d -> LATIN CAPITAL LETTER L WITH STROKE
'\xd7' # 0x009e -> MULTIPLICATION SIGN
'\u010d' # 0x009f -> LATIN SMALL LETTER C WITH CARON
'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
'\u0104' # 0x00a4 -> LATIN CAPITAL LETTER A WITH OGONEK
'\u0105' # 0x00a5 -> LATIN SMALL LETTER A WITH OGONEK
'\u017d' # 0x00a6 -> LATIN CAPITAL LETTER Z WITH CARON
'\u017e' # 0x00a7 -> LATIN SMALL LETTER Z WITH CARON
'\u0118' # 0x00a8 -> LATIN CAPITAL LETTER E WITH OGONEK
'\u0119' # 0x00a9 -> LATIN SMALL LETTER E WITH OGONEK
'\xac' # 0x00aa -> NOT SIGN
'\u017a' # 0x00ab -> LATIN SMALL LETTER Z WITH ACUTE
'\u010c' # 0x00ac -> LATIN CAPITAL LETTER C WITH CARON
'\u015f' # 0x00ad -> LATIN SMALL LETTER S WITH CEDILLA
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\u011a' # 0x00b7 -> LATIN CAPITAL LETTER E WITH CARON
'\u015e' # 0x00b8 -> LATIN CAPITAL LETTER S WITH CEDILLA
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u017b' # 0x00bd -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
'\u017c' # 0x00be -> LATIN SMALL LETTER Z WITH DOT ABOVE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u0102' # 0x00c6 -> LATIN CAPITAL LETTER A WITH BREVE
'\u0103' # 0x00c7 -> LATIN SMALL LETTER A WITH BREVE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\xa4' # 0x00cf -> CURRENCY SIGN
'\u0111' # 0x00d0 -> LATIN SMALL LETTER D WITH STROKE
'\u0110' # 0x00d1 -> LATIN CAPITAL LETTER D WITH STROKE
'\u010e' # 0x00d2 -> LATIN CAPITAL LETTER D WITH CARON
'\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\u010f' # 0x00d4 -> LATIN SMALL LETTER D WITH CARON
'\u0147' # 0x00d5 -> LATIN CAPITAL LETTER N WITH CARON
'\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\u011b' # 0x00d8 -> LATIN SMALL LETTER E WITH CARON
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u0162' # 0x00dd -> LATIN CAPITAL LETTER T WITH CEDILLA
'\u016e' # 0x00de -> LATIN CAPITAL LETTER U WITH RING ABOVE
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
'\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\u0143' # 0x00e3 -> LATIN CAPITAL LETTER N WITH ACUTE
'\u0144' # 0x00e4 -> LATIN SMALL LETTER N WITH ACUTE
'\u0148' # 0x00e5 -> LATIN SMALL LETTER N WITH CARON
'\u0160' # 0x00e6 -> LATIN CAPITAL LETTER S WITH CARON
'\u0161' # 0x00e7 -> LATIN SMALL LETTER S WITH CARON
'\u0154' # 0x00e8 -> LATIN CAPITAL LETTER R WITH ACUTE
'\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE
'\u0155' # 0x00ea -> LATIN SMALL LETTER R WITH ACUTE
'\u0170' # 0x00eb -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
'\xfd' # 0x00ec -> LATIN SMALL LETTER Y WITH ACUTE
'\xdd' # 0x00ed -> LATIN CAPITAL LETTER Y WITH ACUTE
'\u0163' # 0x00ee -> LATIN SMALL LETTER T WITH CEDILLA
'\xb4' # 0x00ef -> ACUTE ACCENT
'\xad' # 0x00f0 -> SOFT HYPHEN
'\u02dd' # 0x00f1 -> DOUBLE ACUTE ACCENT
'\u02db' # 0x00f2 -> OGONEK
'\u02c7' # 0x00f3 -> CARON
'\u02d8' # 0x00f4 -> BREVE
'\xa7' # 0x00f5 -> SECTION SIGN
'\xf7' # 0x00f6 -> DIVISION SIGN
'\xb8' # 0x00f7 -> CEDILLA
'\xb0' # 0x00f8 -> DEGREE SIGN
'\xa8' # 0x00f9 -> DIAERESIS
'\u02d9' # 0x00fa -> DOT ABOVE
'\u0171' # 0x00fb -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
'\u0158' # 0x00fc -> LATIN CAPITAL LETTER R WITH CARON
'\u0159' # 0x00fd -> LATIN SMALL LETTER R WITH CARON
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a4: 0x00cf, # CURRENCY SIGN
0x00a7: 0x00f5, # SECTION SIGN
0x00a8: 0x00f9, # DIAERESIS
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b4: 0x00ef, # ACUTE ACCENT
0x00b8: 0x00f7, # CEDILLA
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE
0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d7: 0x009e, # MULTIPLICATION SIGN
0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00dd: 0x00ed, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00fd: 0x00ec, # LATIN SMALL LETTER Y WITH ACUTE
0x0102: 0x00c6, # LATIN CAPITAL LETTER A WITH BREVE
0x0103: 0x00c7, # LATIN SMALL LETTER A WITH BREVE
0x0104: 0x00a4, # LATIN CAPITAL LETTER A WITH OGONEK
0x0105: 0x00a5, # LATIN SMALL LETTER A WITH OGONEK
0x0106: 0x008f, # LATIN CAPITAL LETTER C WITH ACUTE
0x0107: 0x0086, # LATIN SMALL LETTER C WITH ACUTE
0x010c: 0x00ac, # LATIN CAPITAL LETTER C WITH CARON
0x010d: 0x009f, # LATIN SMALL LETTER C WITH CARON
0x010e: 0x00d2, # LATIN CAPITAL LETTER D WITH CARON
0x010f: 0x00d4, # LATIN SMALL LETTER D WITH CARON
0x0110: 0x00d1, # LATIN CAPITAL LETTER D WITH STROKE
0x0111: 0x00d0, # LATIN SMALL LETTER D WITH STROKE
0x0118: 0x00a8, # LATIN CAPITAL LETTER E WITH OGONEK
0x0119: 0x00a9, # LATIN SMALL LETTER E WITH OGONEK
0x011a: 0x00b7, # LATIN CAPITAL LETTER E WITH CARON
0x011b: 0x00d8, # LATIN SMALL LETTER E WITH CARON
0x0139: 0x0091, # LATIN CAPITAL LETTER L WITH ACUTE
0x013a: 0x0092, # LATIN SMALL LETTER L WITH ACUTE
0x013d: 0x0095, # LATIN CAPITAL LETTER L WITH CARON
0x013e: 0x0096, # LATIN SMALL LETTER L WITH CARON
0x0141: 0x009d, # LATIN CAPITAL LETTER L WITH STROKE
0x0142: 0x0088, # LATIN SMALL LETTER L WITH STROKE
0x0143: 0x00e3, # LATIN CAPITAL LETTER N WITH ACUTE
0x0144: 0x00e4, # LATIN SMALL LETTER N WITH ACUTE
0x0147: 0x00d5, # LATIN CAPITAL LETTER N WITH CARON
0x0148: 0x00e5, # LATIN SMALL LETTER N WITH CARON
0x0150: 0x008a, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
0x0151: 0x008b, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
0x0154: 0x00e8, # LATIN CAPITAL LETTER R WITH ACUTE
0x0155: 0x00ea, # LATIN SMALL LETTER R WITH ACUTE
0x0158: 0x00fc, # LATIN CAPITAL LETTER R WITH CARON
0x0159: 0x00fd, # LATIN SMALL LETTER R WITH CARON
0x015a: 0x0097, # LATIN CAPITAL LETTER S WITH ACUTE
0x015b: 0x0098, # LATIN SMALL LETTER S WITH ACUTE
0x015e: 0x00b8, # LATIN CAPITAL LETTER S WITH CEDILLA
0x015f: 0x00ad, # LATIN SMALL LETTER S WITH CEDILLA
0x0160: 0x00e6, # LATIN CAPITAL LETTER S WITH CARON
0x0161: 0x00e7, # LATIN SMALL LETTER S WITH CARON
0x0162: 0x00dd, # LATIN CAPITAL LETTER T WITH CEDILLA
0x0163: 0x00ee, # LATIN SMALL LETTER T WITH CEDILLA
0x0164: 0x009b, # LATIN CAPITAL LETTER T WITH CARON
0x0165: 0x009c, # LATIN SMALL LETTER T WITH CARON
0x016e: 0x00de, # LATIN CAPITAL LETTER U WITH RING ABOVE
0x016f: 0x0085, # LATIN SMALL LETTER U WITH RING ABOVE
0x0170: 0x00eb, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
0x0171: 0x00fb, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
0x0179: 0x008d, # LATIN CAPITAL LETTER Z WITH ACUTE
0x017a: 0x00ab, # LATIN SMALL LETTER Z WITH ACUTE
0x017b: 0x00bd, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x017c: 0x00be, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x017d: 0x00a6, # LATIN CAPITAL LETTER Z WITH CARON
0x017e: 0x00a7, # LATIN SMALL LETTER Z WITH CARON
0x02c7: 0x00f3, # CARON
0x02d8: 0x00f4, # BREVE
0x02d9: 0x00fa, # DOT ABOVE
0x02db: 0x00f2, # OGONEK
0x02dd: 0x00f1, # DOUBLE ACUTE ACCENT
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| ArcherSys/ArcherSys | Lib/encodings/cp852.py | Python | mit | 105,146 |
""" Setup file """
import os
from setuptools import find_packages, setup
HERE = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(HERE, "README.rst")) as f:
README = f.read()
with open(os.path.join(HERE, "CHANGES.rst")) as f:
CHANGES = f.read()
REQUIREMENTS_TEST = open(os.path.join(HERE, "requirements_test.txt")).readlines()
REQUIREMENTS = [
"botocore>=0.89.0",
]
if __name__ == "__main__":
setup(
name="dynamo3",
version="1.0.0",
description="Python 3 compatible library for DynamoDB",
long_description=README + "\n\n" + CHANGES,
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
author="Steven Arcangeli",
author_email="[email protected]",
url="http://github.com/stevearc/dynamo3",
keywords="aws dynamo dynamodb",
include_package_data=True,
packages=find_packages(exclude=("tests",)),
license="MIT",
entry_points={
"console_scripts": [
"dynamodb-local = dynamo3.testing:run_dynamo_local",
],
"nose.plugins": [
"dynamolocal=dynamo3.testing:DynamoLocalPlugin",
],
},
python_requires=">=3.6",
install_requires=REQUIREMENTS,
tests_require=REQUIREMENTS + REQUIREMENTS_TEST,
)
| stevearc/dynamo3 | setup.py | Python | mit | 1,819 |
#!/usr/bin/env python
import Config
import Database
import atexit, os, time
from flask import Flask
from concurrent.futures import ThreadPoolExecutor
from classes import CRONTask
# Generate a thread pool
executor = ThreadPoolExecutor(5)
app = Flask( __name__ )
@app.route( "/" )
def index( ) :
return "View Generator Service is Active!"
@app.route( "/View" )
def view( ) :
executor.submit(runTask)
return ""
def runTask( ) :
cron = CRONTask.CRONTask( )
cron.run( )
cron.killPID( )
sys.exit(0)
if __name__ == '__main__' :
app.run( debug=True, port=Config.PORT, host=Config.HOST ) | BioGRID/ORCA | operations/ViewGenerator/ViewGeneratorService.py | Python | mit | 594 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-23 15:47
from __future__ import unicode_literals
from django.db import migrations, models
import oktansite.models
class Migration(migrations.Migration):
dependencies = [
('oktansite', '0004_news_attachment'),
]
operations = [
migrations.AddField(
model_name='news',
name='image',
field=models.ImageField(null=True, upload_to=oktansite.models.get_upload_path_news_attachment),
),
]
| aliakbr/oktan | oktansite/migrations/0005_news_image.py | Python | mit | 526 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from Models.FeatureProcessing import *
from keras.models import Sequential
from keras.layers import Activation, Dense, LSTM
from keras.optimizers import Adam, SGD
import numpy as np
import abc
from ClassificationModule import ClassificationModule
class descriptionreponamelstm(ClassificationModule):
"""A basic lstm neural network"""
def __init__(self, num_hidden_layers=3):
ClassificationModule.__init__(self, "Description and reponame LSTM", "A LSTM reading the description and reponame character by character")
hidden_size = 300
self.maxlen = 300
# Set output_size
self.output_size = 7 # Hardcoded for 7 classes
model = Sequential()
# Maximum of self.maxlen charcters allowed, each in one-hot-encoded array
model.add(LSTM(hidden_size, input_shape=(self.maxlen, getLstmCharLength())))
for _ in range(num_hidden_layers):
model.add(Dense(hidden_size))
model.add(Dense(self.output_size))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=SGD(),
metrics=['accuracy'])
self.model = model
print "\t-", self.name
def resetAllTraining(self):
"""Reset classification module to status before training"""
resetWeights(self.model)
def trainOnSample(self, sample, nb_epoch=1, shuffle=True, verbose=True):
"""Trainiere (inkrementell) mit Sample. Evtl zusätzlich mit best. Menge alter Daten, damit overfitten auf neue Daten verhindert wird."""
readme_vec = self.formatInputData(sample)
label_index = getLabelIndex(sample)
label_one_hot = np.expand_dims(oneHot(label_index), axis=0) # [1, 0, 0, ..] -> [[1, 0, 0, ..]] Necessary for keras
self.model.fit(readme_vec, label_one_hot, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose)
def train(self, samples, nb_epoch=200, shuffle=True, verbose=True):
"""Trainiere mit Liste von Daten. Evtl weitere Paramter nötig (nb_epoch, learning_rate, ...)"""
train_samples = []
train_lables = []
for sample in samples:
formatted_sample = self.formatInputData(sample)[0].tolist()
train_samples.append(formatted_sample)
train_lables.append(oneHot(getLabelIndex(sample)))
train_lables = np.asarray(train_lables)
train_result = self.model.fit(train_samples, train_lables, nb_epoch=nb_epoch, shuffle=shuffle, verbose=verbose, class_weight=getClassWeights())
self.isTrained = True
return train_result
def predictLabel(self, sample):
"""Gibt zurück, wie der Klassifikator ein gegebenes Sample klassifizieren würde"""
if not self.isTrained:
return 0
sample = self.formatInputData(sample)
return np.argmax(self.model.predict(sample))
def predictLabelAndProbability(self, sample):
"""Return the probability the module assignes each label"""
if not self.isTrained:
return [0, 0, 0, 0, 0, 0, 0, 0]
sample = self.formatInputData(sample)
prediction = self.model.predict(sample)[0]
return [np.argmax(prediction)] + list(prediction) # [0] So 1-D array is returned
def formatInputData(self, sample):
"""Extract description and transform to vector"""
sd = getDescription(sample)
sd += getName(sample)
# Returns numpy array which contains 1 array with features
return np.expand_dims(lstmEncode(sd, maxlen=self.maxlen), axis=0)
| Ichaelus/Github-Classifier | Application/Models/ClassificationModules/descriptionreponamelstm.py | Python | mit | 3,641 |
__author__ = 'sekely'
'''
we are using variables almost everywhere in the code.
variables are used to store results, calculations and many more.
this of it as the famous "x" from high school
x = 5, right?
the only thing is, that in Python "x" can store anything
'''
# try this code:
x = 5
y = x + 3
print(y)
# what about this? will it work?
x = 'hello'
y = ' '
z = 'world!'
w = x + y + z
print(w)
| idosekely/python-lessons | lesson_1/variables.py | Python | mit | 403 |
# img
# trigger = attributes[12]
# http://ws-tcg.com/en/cardlist
# edit
import os
import requests
import sqlite3
def get_card(browser):
attributes = browser.find_elements_by_xpath('//table[@class="status"]/tbody/tr/td')
image = attributes[0].find_element_by_xpath('./img').get_attribute('src')
if attributes[1].find_element_by_xpath('./span[@class="kana"]').text:
card_name = attributes[1].find_element_by_xpath('./span[@class="kana"]').text
else:
card_name = None
card_no = attributes[2].text if attributes[2].text else None
rarity = attributes[3].text if attributes[3].text else None
expansion = attributes[4].text if attributes[4].text else None
if attributes[5].find_element_by_xpath('./img').get_attribute("src") == "http://ws-tcg.com/en/cardlist/partimages/w.gif":
side = "Weiß"
elif attributes[5].find_element_by_xpath('./img').get_attribute("src") == "http://ws-tcg.com/en/cardlist/partimages/s.gif":
side = "Schwarz"
else:
side = None
card_type = attributes[6].text if attributes[6].text else None
if attributes[7].find_element_by_xpath('./img').get_attribute("src") == "http://ws-tcg.com/en/cardlist/partimages/yellow.gif":
color = "Yellow"
elif attributes[7].find_element_by_xpath('./img').get_attribute("src") == "http://ws-tcg.com/en/cardlist/partimages/green.gif":
color = "Green"
elif attributes[7].find_element_by_xpath('./img').get_attribute("src") == "http://ws-tcg.com/en/cardlist/partimages/red.gif":
color = "Red"
elif attributes[7].find_element_by_xpath('./img').get_attribute("src") == "http://ws-tcg.com/en/cardlist/partimages/blue.gif":
color = "Blue"
else:
color = None
level = attributes[8].text if attributes[8].text else None
cost = attributes[9].text if attributes[9].text else None
power = attributes[10].text if attributes[10].text else None
soul = len(attributes[11].find_elements_by_xpath('./img[contains(@src, "http://ws-tcg.com/en/cardlist/partimages/soul.gif")]'))
special_attribute = attributes[13].text if attributes[13].text else None
text = attributes[14].text if attributes[14].text else None
flavor_text = attributes[15].text if attributes[15].text else None
if not os.path.exists("images"):
os.makedirs("images")
if not os.path.exists("images/" + card_no.split("/")[0]):
os.makedirs("images/" + card_no.split("/")[0])
r = requests.get(image, stream=True)
if r.status_code == 200:
with open("images/" + card_no + ".jpg", 'wb') as f:
for chunk in r:
f.write(chunk)
card = (card_name, card_no, rarity, expansion, side, card_type, color, level, cost, power, soul,
special_attribute, text, flavor_text)
connection = sqlite3.connect('cards.sqlite3')
cursor = connection.cursor()
cursor.execute('INSERT INTO cards (name, no, rarity, expansion, side, type, color, level, cost, power, soul,'
'special_attribute, text, flavor_text) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?,?, ?)', card)
connection.commit()
connection.close()
| electronicdaisy/WeissSchwarzTCGDatabase | card.py | Python | mit | 3,176 |
#
# NineMSN CatchUp TV Video API Library
#
# This code is forked from Network Ten CatchUp TV Video API Library
# Copyright (c) 2013 Adam Malcontenti-Wilson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from brightcove.core import APIObject, Field, DateTimeField, ListField, EnumField
from brightcove.objects import ItemCollection, enum
ChannelNameEnum = enum('ten', 'eleven', 'one')
PlaylistTypeEnum = enum('full_episodes', 'web_extras', 'news', 'season', 'week', 'category', 'special', 'preview')
MediaDeliveryEnum = enum('default', 'http', 'http_ios')
class EnumNumField(Field):
def __init__(self, enum_cls, help=None):
self.help = help
self.enum_cls = enum_cls
def to_python(self, value):
for i, field in enumerate(self.enum_cls._fields):
if i == value:
return field
raise Exception('Invalid Enum: %s' % value)
def from_python(self, value):
return self.enum_cls._fields[value]
class Playlist(APIObject):
_fields = ['name', 'type', 'season', 'week', 'query']
type = EnumField(PlaylistTypeEnum)
def __repr__(self):
return '<Playlist name=\'{0}\'>'.format(self.name)
class Show(APIObject):
_fields = ['showName', 'channelName', 'videoLink', 'mobileLink', 'logo', 'fanart', 'playlists']
channelName = EnumField(ChannelNameEnum)
playlists = ListField(Playlist)
def __repr__(self):
return '<Show name=\'{0}\'>'.format(self.showName)
class AMFRendition(APIObject):
_fields = ['defaultURL', 'audioOnly', 'mediaDeliveryType', 'encodingRate',
'frameHeight', 'frameWidth', 'size',
'videoCodec', 'videoContainer']
mediaDeliveryType = EnumNumField(MediaDeliveryEnum)
def __repr__(self):
return '<Rendition bitrate=\'{0}\' type=\'{1}\' frameSize=\'{2}x{3}\'>'.format(self.encodingRate, self.mediaDeliveryType, self.frameWidth, self.frameHeight)
class ShowItemCollection(ItemCollection):
_item_class = Show
items = ListField(Show)
class PlaylistItemCollection(ItemCollection):
_item_class = Playlist
items = ListField(Playlist)
class MediaRenditionItemCollection(ItemCollection):
_item_class = AMFRendition
items = ListField(AMFRendition)
| predakanga/plugin.video.catchuptv.au.ninemsn | resources/lib/ninemsnvideo/objects.py | Python | mit | 3,248 |
from Bio import SeqIO
def get_proteins_for_db(fastafn, fastadelim, genefield):
"""Runs through fasta file and returns proteins accession nrs, sequences
and evidence levels for storage in lookup DB. Duplicate accessions in
fasta are accepted and removed by keeping only the last one.
"""
records = {acc: (rec, get_record_type(rec)) for acc, rec in
SeqIO.index(fastafn, 'fasta').items()}
proteins = ((x,) for x in records.keys())
sequences = ((acc, str(rec.seq)) for acc, (rec, rtype) in records.items())
desc = ((acc, get_description(rec, rtype)) for acc, (rec, rtype) in records.items() if rtype)
evid = ((acc, get_uniprot_evidence_level(rec, rtype)) for acc, (rec, rtype) in
records.items())
ensgs = [(get_ensg(rec), acc) for acc, (rec, rtype) in records.items()
if rtype == 'ensembl']
def sym_out():
symbols = ((get_symbol(rec, rtype, fastadelim, genefield), acc) for
acc, (rec, rtype) in records.items() if rtype)
othergene = ((get_other_gene(rec, fastadelim, genefield), acc) for acc, (rec, rtype) in records.items()
if not rtype and fastadelim and fastadelim in rec.description)
yield from symbols
yield from othergene
return proteins, sequences, desc, evid, ensgs, [x for x in sym_out()]
def parse_fasta(fn):
with open(fn) as fp:
for record in SeqIO.parse(fp, 'fasta'):
yield record
def get_record_type(record):
dmod = get_decoy_mod_string(record.id)
test_name = record.id
if dmod is not None:
test_name = record.id.replace(dmod, '')
if test_name.split('|')[0] in ['sp', 'tr']:
return 'swiss'
elif test_name[:3] == 'ENS':
return 'ensembl'
else:
return False
def get_decoy_mod_string(protein):
mods = ['tryp_reverse', 'reverse', 'decoy', 'random', 'shuffle']
for mod in mods:
if mod in protein:
if protein.endswith('_{}'.format(mod)):
return '_{}'.format(mod)
elif protein.endswith('{}'.format(mod)):
return mod
elif protein.startswith('{}_'.format(mod)):
return '{}_'.format(mod)
elif protein.startswith('{}'.format(mod)):
return mod
def get_description(record, rectype):
if rectype == 'ensembl':
desc_spl = [x.split(':') for x in record.description.split()]
try:
descix = [ix for ix, x in enumerate(desc_spl) if x[0] == 'description'][0]
except IndexError:
return 'NA'
desc = ' '.join([':'.join(x) for x in desc_spl[descix:]])[12:]
return desc
elif rectype == 'swiss':
desc = []
for part in record.description.split()[1:]:
if len(part.split('=')) > 1:
break
desc.append(part)
return ' '.join(desc)
def get_other_gene(record, fastadelim, genefield):
return record.description.split(fastadelim)[genefield]
def get_genes_pickfdr(fastafn, outputtype, fastadelim, genefield):
"""Called by protein FDR module for both ENSG and e.g. Uniprot"""
for rec in parse_fasta(fastafn):
rtype = get_record_type(rec)
if rtype == 'ensembl' and outputtype == 'ensg':
yield get_ensg(rec)
elif outputtype == 'genename':
yield get_symbol(rec, rtype, fastadelim, genefield)
def get_ensg(record):
fields = [x.split(':') for x in record.description.split()]
try:
return [x[1] for x in fields if x[0] == 'gene' and len(x) == 2][0]
except IndexError:
raise RuntimeError('ENSEMBL detected but cannot find gene ENSG in fasta')
def get_symbol(record, rectype, fastadelim, genefield):
if rectype == 'ensembl':
fields = [x.split(':') for x in record.description.split()]
sym = [x[1] for x in fields if x[0] == 'gene_symbol' and len(x) == 2]
elif rectype == 'swiss':
fields = [x.split('=') for x in record.description.split()]
sym = [x[1] for x in fields if x[0] == 'GN' and len(x) == 2]
elif fastadelim and fastadelim in record.description and genefield:
return record.description.split(fastadelim)[genefield]
else:
return 'NA'
try:
return sym[0]
except IndexError:
return 'NA'
def get_uniprot_evidence_level(record, rtype):
"""Returns uniprot protein existence evidence level for a fasta header.
Evidence levels are 1-5, but we return 5 - x since sorting still demands
that higher is better."""
if rtype != 'swiss':
return -1
for item in record.description.split():
item = item.split('=')
try:
if item[0] == 'PE' and len(item) == 2:
return 5 - int(item[1])
except IndexError:
continue
return -1
| glormph/msstitch | src/app/readers/fasta.py | Python | mit | 4,853 |
# ----------------------------------------------------------------------------
# Copyright 2015-2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
'''
Test BLEUScore metric against reference
'''
from neon.transforms.cost import BLEUScore
def test_bleuscore():
# dataset with two sentences
sentences = ["a quick brown fox jumped",
"the rain in spain falls mainly on the plains"]
references = [["a fast brown fox jumped",
"a quick brown fox vaulted",
"a rapid fox of brown color jumped",
"the dog is running on the grass"],
["the precipitation in spain falls on the plains",
"spanish rain falls for the most part on the plains",
"the rain in spain falls in the plains most of the time",
"it is raining today"]]
# reference scores for the given set of reference sentences
bleu_score_references = [92.9, 88.0, 81.5, 67.1] # bleu1, bleu2, bleu3, bleu4
# compute scores
bleu_metric = BLEUScore()
bleu_metric(sentences, references)
# check against references
for score, reference in zip(bleu_metric.bleu_n, bleu_score_references):
assert round(score, 1) == reference
if __name__ == '__main__':
test_bleuscore()
| matthijsvk/multimodalSR | code/Experiments/neon-master/tests/test_bleuscore.py | Python | mit | 1,914 |
#-*- coding: utf-8 -*-
from .grant import Grant
from ..endpoint import AuthorizationEndpoint
class ImplicitGrant(Grant):
"""
The implicit grant type is used to obtain access tokens (it does not
support the issuance of refresh tokens) and is optimized for public
clients known to operate a particular redirection URI. These clients
are typically implemented in a browser using a scripting language
such as JavaScript.
+----------+
| Resource |
| Owner |
| |
+----------+
^
|
(B)
+----|-----+ Client Identifier +---------------+
| -+----(A)-- & Redirection URI --->| |
| User- | | Authorization |
| Agent -|----(B)-- User authenticates -->| Server |
| | | |
| |<---(C)--- Redirection URI ----<| |
| | with Access Token +---------------+
| | in Fragment
| | +---------------+
| |----(D)--- Redirection URI ---->| Web-Hosted |
| | without Fragment | Client |
| | | Resource |
| (F) |<---(E)------- Script ---------<| |
| | +---------------+
+-|--------+
| |
(A) (G) Access Token
| |
^ v
+---------+
| |
| Client |
| |
+---------+
Note: The lines illustrating steps (A) and (B) are broken into two
parts as they pass through the user-agent.
Figure 4: Implicit Grant Flow
"""
def get_redirection_uri(self, expires_in):
self._authorization_endpoint = AuthorizationEndpoint(self._server, self._request, self._client)
return self._authorization_endpoint.implicit(expires_in)
| uptown/django-town | django_town/oauth2/grant/implicitgrant.py | Python | mit | 2,061 |
#!/usr/bin/env python3
from framework import do_exit, get_globals, main
def do_work():
global g_test_import
global globals1
print("do_work")
globals1 = get_globals()
g_test_import = globals1["g_test_import"]
print("do_work: g_test_import = %s" % str(g_test_import))
main(do_work)
| jtraver/dev | python3/globals/test1.py | Python | mit | 308 |
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/Execute.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Test the Execute() function for executing actions directly.
"""
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.write('my_copy.py', """\
import sys
open(sys.argv[2], 'wb').write(open(sys.argv[1], 'rb').read())
try:
exitval = int(sys.argv[3])
except IndexError:
exitval = 0
sys.exit(exitval)
""")
test.write('SConstruct', """\
Execute(r'%(_python_)s my_copy.py a.in a.out')
Execute(Action(r'%(_python_)s my_copy.py b.in b.out'))
env = Environment(COPY = 'my_copy.py')
env.Execute(r'%(_python_)s my_copy.py c.in c.out')
env.Execute(Action(r'%(_python_)s my_copy.py d.in d.out'))
v = env.Execute(r'%(_python_)s $COPY e.in e.out')
assert v == 0, v
v = env.Execute(Action(r'%(_python_)s $COPY f.in f.out'))
assert v == 0, v
v = env.Execute(r'%(_python_)s $COPY g.in g.out 1')
assert v == 1, v
v = env.Execute(Action(r'%(_python_)s $COPY h.in h.out 2'))
assert v == 2, v
import shutil
Execute(lambda target, source, env: shutil.copy('i.in', 'i.out'))
Execute(Action(lambda target, source, env: shutil.copy('j.in', 'j.out')))
env.Execute(lambda target, source, env: shutil.copy('k.in', 'k.out'))
env.Execute(Action(lambda target, source, env: shutil.copy('l.in', 'l.out')))
Execute(Copy('m.out', 'm.in'))
Execute(Copy('nonexistent.out', 'nonexistent.in'))
""" % locals())
test.write('a.in', "a.in\n")
test.write('b.in', "b.in\n")
test.write('c.in', "c.in\n")
test.write('d.in', "d.in\n")
test.write('e.in', "e.in\n")
test.write('f.in', "f.in\n")
test.write('g.in', "g.in\n")
test.write('h.in', "h.in\n")
test.write('i.in', "i.in\n")
test.write('j.in', "j.in\n")
test.write('k.in', "k.in\n")
test.write('l.in', "l.in\n")
test.write('m.in', "m.in\n")
import sys
if sys.platform == 'win32':
expect = r"""scons: \*\*\* Error 1
scons: \*\*\* Error 2
scons: \*\*\* nonexistent.in/\*\.\*: (The system cannot find the path specified|Das System kann den angegebenen Pfad nicht finden)"""
else:
expect = r"""scons: \*\*\* Error 1
scons: \*\*\* Error 2
scons: \*\*\* nonexistent\.in: No such file or directory"""
test.run(arguments = '.', stdout = None, stderr = None)
test.must_contain_all_lines(test.stderr(), expect.splitlines(), find=TestSCons.search_re)
test.must_match('a.out', "a.in\n")
test.must_match('b.out', "b.in\n")
test.must_match('c.out', "c.in\n")
test.must_match('d.out', "d.in\n")
test.must_match('e.out', "e.in\n")
test.must_match('f.out', "f.in\n")
test.must_match('g.out', "g.in\n")
test.must_match('h.out', "h.in\n")
test.must_match('i.out', "i.in\n")
test.must_match('j.out', "j.in\n")
test.must_match('k.out', "k.in\n")
test.must_match('l.out', "l.in\n")
test.must_match('m.out', "m.in\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| EmanueleCannizzaro/scons | test/Execute.py | Python | mit | 4,013 |
from django import forms
from django.core.validators import validate_email
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from .utils import get_user_model
class PasswordRecoveryForm(forms.Form):
username_or_email = forms.CharField()
error_messages = {
'not_found': _("Sorry, this user doesn't exist."),
}
def __init__(self, *args, **kwargs):
self.case_sensitive = kwargs.pop('case_sensitive', True)
search_fields = kwargs.pop('search_fields', ('username', 'email'))
super(PasswordRecoveryForm, self).__init__(*args, **kwargs)
message = ("No other fields than email are supported "
"by default")
if len(search_fields) not in (1, 2):
raise ValueError(message)
for field in search_fields:
if field not in ['username', 'email']:
raise ValueError(message)
labels = {
'username': _('Username'),
'email': _('Email'),
'both': _('Username or Email'),
}
User = get_user_model() # noqa
if getattr(User, 'USERNAME_FIELD', 'username') == 'email':
self.label_key = 'email'
elif len(search_fields) == 1:
self.label_key = search_fields[0]
else:
self.label_key = 'both'
self.fields['username_or_email'].label = labels[self.label_key]
def clean_username_or_email(self):
username = self.cleaned_data['username_or_email']
cleaner = getattr(self, 'get_user_by_%s' % self.label_key)
self.cleaned_data['user'] = cleaner(username)
return username
def get_user_by_username(self, username):
key = 'username__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: username})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_email(self, email):
validate_email(email)
key = 'email__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: email})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_both(self, username):
key = '__%sexact'
key = key % '' if self.case_sensitive else key % 'i'
f = lambda field: Q(**{field + key: username})
filters = f('username') | f('email')
User = get_user_model()
try:
user = User._default_manager.get(filters)
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
except User.MultipleObjectsReturned:
raise forms.ValidationError(_("Unable to find user."))
return user
class PasswordResetForm(forms.Form):
password1 = forms.CharField(
label=_('New password'),
widget=forms.PasswordInput,
)
password2 = forms.CharField(
label=_('New password (confirm)'),
widget=forms.PasswordInput,
)
error_messages = {
'password_mismatch': _("The two passwords didn't match."),
}
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(PasswordResetForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1', '')
password2 = self.cleaned_data['password2']
if not password1 == password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch')
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['password1'])
if commit:
get_user_model()._default_manager.filter(pk=self.user.pk).update(
password=self.user.password,
)
return self.user
| xsunfeng/cir | password_reset/forms.py | Python | mit | 4,276 |
from bioscrape.inference import DeterministicLikelihood as DLL
from bioscrape.inference import StochasticTrajectoriesLikelihood as STLL
from bioscrape.inference import StochasticTrajectories
from bioscrape.inference import BulkData
import warnings
import numpy as np
class PIDInterface():
'''
PID Interface : Parameter identification interface.
Super class to create parameter identification (PID) interfaces. Two PID interfaces currently implemented:
Deterministic and Stochastic inference using time-series data.
To add a new PIDInterface - simply add a new subclass of this parent class with your desired
log-likelihood functions. You can even have your own check_prior function in that class if you do not
prefer to use the built in priors with this package.
'''
def __init__(self, params_to_estimate, M, prior):
'''
Parent class for all PID interfaces.
Arguments:
* `params_to_estimate` : List of parameter names to be estimated
* `M` : The bioscrape Model object to use for inference
* `prior` : A dictionary specifying prior distribution.
Two built-in prior functions are `uniform_prior` and `gaussian_prior`.
Each prior has its own syntax for accepting the distribution parameters in the dictionary.
New priors may be added. The suggested format for prior dictionaries:
prior_dict = {'parameter_name': ['prior_name', prior_distribution_parameters]}
For built-in uniform prior, use {'parameter_name':['uniform', lower_bound, upper_bound]}
For built-in gaussian prior, use {'parameter_name':['gaussian', mean, standard_deviation, probability threshold]}
New PID interfaces can be added by creating child classes of PIDInterface class as shown for
Built-in PID interfaces : `StochasticInference` and `DeterministicInference`
'''
self.params_to_estimate = params_to_estimate
self.M = M
self.prior = prior
return
def check_prior(self, params_dict):
'''
To add new prior functions: simply add a new function similar to ones that exist and then
call it here.
'''
lp = 0.0
for key,value in params_dict.items():
if 'positive' in self.prior[key] and value < 0:
return np.inf
prior_type = self.prior[key][0]
if prior_type == 'uniform':
lp += self.uniform_prior(key, value)
elif prior_type == 'gaussian':
lp += self.gaussian_prior(key, value)
elif prior_type == 'exponential':
lp += self.exponential_prior(key, value)
elif prior_type == 'gamma':
lp += self.gamma_prior(key, value)
elif prior_type == 'log-uniform':
lp += self.log_uniform_prior(key, value)
elif prior_type == 'log-gaussian':
lp += self.log_gaussian_prior(key, value)
elif prior_type == 'beta':
lp += self.beta_prior(key, value)
elif prior_type == 'custom':
# The last element in the prior dictionary must be a callable function
# The callable function shoud have the following signature :
# Arguments: param_name (str), param_value(float)
# Returns: log prior probability (float or numpy inf)
custom_fuction = self.prior[key][-1]
lp += custom_fuction(key, value)
else:
raise ValueError('Prior type undefined.')
return lp
def uniform_prior(self, param_name, param_value):
'''
Check if given param_value is valid according to the prior distribution.
Returns np.Inf if the param_value is outside the prior range and 0.0 if it is inside.
param_name is used to look for the parameter in the prior dictionary.
'''
prior_dict = self.prior
if prior_dict is None:
raise ValueError('No prior found')
lower_bound = prior_dict[param_name][1]
upper_bound = prior_dict[param_name][2]
if param_value > upper_bound or param_value < lower_bound:
return np.inf
else:
return np.log( 1/(upper_bound - lower_bound) )
def gaussian_prior(self, param_name, param_value):
'''
Check if given param_value is valid according to the prior distribution.
Returns the log prior probability or np.Inf if the param_value is invalid.
'''
prior_dict = self.prior
if prior_dict is None:
raise ValueError('No prior found')
mu = prior_dict[param_name][1]
sigma = prior_dict[param_name][2]
if sigma < 0:
raise ValueError('The standard deviation must be positive.')
# Using probability density function for normal distribution
# Using scipy.stats.norm has overhead that affects speed up to 2x
prob = 1/(np.sqrt(2*np.pi) * sigma) * np.exp(-0.5*(param_value - mu)**2/sigma**2)
if prob < 0:
warnings.warn('Probability less than 0 while checking Gaussian prior! Current parameter name and value: {0}:{1}.'.format(param_name, param_value))
return np.inf
else:
return np.log(prob)
def exponential_prior(self, param_name, param_value):
'''
Check if given param_value is valid according to the prior distribution.
Returns the log prior probability or np.inf if the param_value is invalid.
'''
prior_dict = self.prior
if prior_dict is None:
raise ValueError('No prior found')
lambda_p = prior_dict[param_name][1]
prob = lambda_p * np.exp(-lambda_p * param_value)
if prob < 0:
warnings.warn('Probability less than 0 while checking Exponential prior! Current parameter name and value: {0}:{1}.'.format(param_name, param_value))
return np.inf
else:
return np.log(prob)
def gamma_prior(self, param_name, param_value):
'''
Check if given param_value is valid according to the prior distribution.
Returns the log prior probability or np.inf if the param_value is invalid.
'''
prior_dict = self.prior
if prior_dict is None:
raise ValueError('No prior found')
alpha = prior_dict[param_name][1]
beta = prior_dict[param_name][2]
from scipy.special import gamma
prob = (beta**alpha)/gamma(alpha) * param_value**(alpha - 1) * np.exp(-1 * beta*param_value)
if prob < 0:
warnings.warn('Probability less than 0 while checking Exponential prior! Current parameter name and value: {0}:{1}.'.format(param_name, param_value))
return np.inf
else:
return np.log(prob)
def beta_prior(self, param_name, param_value):
'''
Check if given param_value is valid according to the prior distribution.
Returns the log prior probability or np.inf if the param_value is invalid.
'''
prior_dict = self.prior
if prior_dict is None:
raise ValueError('No prior found')
alpha = prior_dict[param_name][1]
beta = prior_dict[param_name][2]
import scipy.special.beta as beta_func
prob = (param_value**(alpha-1) * (1 - param_value)**(beta - 1) )/beta_func(alpha, beta)
if prob < 0:
warnings.warn('Probability less than 0 while checking Exponential prior! Current parameter name and value: {0}:{1}.'.format(param_name, param_value))
return np.inf
else:
return np.log(prob)
def log_uniform_prior(self, param_name, param_value):
'''
Check if given param_value is valid according to the prior distribution.
Returns the log prior probability or np.inf if the param_value is invalid.
'''
prior_dict = self.prior
if prior_dict is None:
raise ValueError('No prior found')
lower_bound = prior_dict[param_name][1]
upper_bound = prior_dict[param_name][2]
if lower_bound < 0 or upper_bound < 0:
raise ValueError('Upper and lower bounds for log-uniform prior must be positive.')
if param_value > upper_bound or param_value < lower_bound:
return np.inf
prob = 1/(param_value* (np.log(upper_bound) - np.log(lower_bound)))
if prob < 0:
warnings.warn('Probability less than 0 while checking Log-Uniform prior! Current parameter name and value: {0}:{1}.'.format(param_name, param_value))
return np.inf
else:
return np.log(prob)
def log_gaussian_prior(self, param_name, param_value):
'''
Check if given param_value is valid according to the prior distribution.
Returns the log prior probability or np.inf if the param_value is invalid.
'''
prior_dict = self.prior
if prior_dict is None:
raise ValueError('No prior found')
mu = prior_dict[param_name][1]
sigma = prior_dict[param_name][2]
if sigma < 0:
raise ValueError('The standard deviation must be positive.')
# Using probability density function for log-normal distribution
prob = 1/(param_value * np.sqrt(2*np.pi) * sigma) * np.exp((-0.5 * (np.log(param_value) - mu)**2)/sigma**2)
if prob < 0:
warnings.warn('Probability less than 0 while checking log-normal prior! Current parameter name and value: {0}:{1}.'.format(param_name, param_value))
return np.inf
else:
return np.log(prob)
# Add a new class similar to this to create new interfaces.
class StochasticInference(PIDInterface):
def __init__(self, params_to_estimate, M, prior):
self.LL_stoch = None
self.dataStoch = None
super().__init__(params_to_estimate, M, prior)
return
def setup_likelihood_function(self, data, timepoints, measurements, initial_conditions, norm_order = 2, N_simulations = 3, debug = False, **kwargs):
N = np.shape(data)[0]
if debug:
print('Stochastic inference attributes:')
print('The timepoints shape is {0}'.format(np.shape(timepoints)))
print('The data shape is {0}'.format(np.shape(data)))
print('The measurmenets is {0}'.format(measurements))
print('The N is {0}'.format(N))
print('Using the initial conditions: {0}'.format(initial_conditions))
self.dataStoch = StochasticTrajectories(np.array(timepoints), data, measurements, N)
#If there are multiple initial conditions in a data-set, should correspond to multiple initial conditions for inference.
#Note len(initial_conditions) must be equal to the number of trajectories N
self.LL_stoch = STLL(model = self.M, init_state = initial_conditions,
data = self.dataStoch, N_simulations = N_simulations, norm_order = norm_order)
def get_likelihood_function(self, params):
# Set params here and return the likelihood object.
if self.LL_stoch is None:
raise RuntimeError("Must call StochasticInference.setup_likelihood_function before using StochasticInference.get_likelihood_function.")
#Set params
params_dict = {}
for key, p in zip(self.params_to_estimate, params):
params_dict[key] = p
self.LL_stoch.set_init_params(params_dict)
#Prior
lp = self.check_prior(params_dict)
if not np.isfinite(lp):
return -np.inf
LL_stoch_cost = self.LL_stoch.py_log_likelihood()
ln_prob = lp + LL_stoch_cost
return ln_prob
# Add a new class similar to this to create new interfaces.
class DeterministicInference(PIDInterface):
def __init__(self, params_to_estimate, M, prior):
self.LL_det = None
self.dataDet = None
super().__init__(params_to_estimate, M, prior)
return
def setup_likelihood_function(self, data, timepoints, measurements, initial_conditions, norm_order = 2, debug = False, **kwargs):
N = np.shape(data)[0]
#Create a data Objects
# In this case the timepoints should be a list of timepoints vectors for each iteration
self.dataDet = BulkData(np.array(timepoints), data, measurements, N)
#If there are multiple initial conditions in a data-set, should correspond to multiple initial conditions for inference.
#Note len(initial_conditions) must be equal to the number of trajectories N
if debug:
print('The deterministic inference attributes:')
print('The timepoints shape is {0}'.format(np.shape(timepoints)))
print('The data shape is {0}'.format(np.shape(data)))
print('The measurmenets is {0}'.format(measurements))
print('The N is {0}'.format(N))
print('Using the initial conditions: {0}'.format(initial_conditions))
#Create Likelihood object
self.LL_det = DLL(model = self.M, init_state = initial_conditions, data = self.dataDet, norm_order = norm_order)
def get_likelihood_function(self, params):
if self.LL_det is None:
raise RuntimeError("Must call DeterministicInference.setup_likelihood_function before using DeterministicInference.get_likelihood_function.")
#this part is the only part that is called repeatedly
params_dict = {}
for key, p in zip(self.params_to_estimate, params):
params_dict[key] = p
self.LL_det.set_init_params(params_dict)
# Check prior
lp = 0
lp = self.check_prior(params_dict)
if not np.isfinite(lp):
return -np.inf
#apply cost function
LL_det_cost = self.LL_det.py_log_likelihood()
ln_prob = lp + LL_det_cost
return ln_prob
| ananswam/bioscrape | bioscrape/pid_interfaces.py | Python | mit | 13,998 |
from flask_bcrypt import generate_password_hash
# Change the number of rounds (second argument) until it takes between
# 0.25 and 0.5 seconds to run.
generate_password_hash('password1', 8)
| VaSe7u/Supernutrient_0_5 | hash_check.py | Python | mit | 195 |
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2017 Tuukka Turto
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Module for main window related functionality
"""
import PyQt4.QtGui
from herculeum.ui.controllers import EndScreenController, StartGameController
from herculeum.ui.gui.endscreen import EndScreen
from herculeum.ui.gui.eventdisplay import EventMessageDockWidget
from herculeum.ui.gui.map import PlayMapWindow
from herculeum.ui.gui.menu import MenuDialog
from herculeum.ui.gui.startgame import StartGameWidget
from PyQt4.QtCore import QFile, Qt
from PyQt4.QtGui import (QAction, QApplication, QCursor, QDialog, QIcon,
QMainWindow, QPixmap, QSplashScreen)
class QtUserInterface():
"""
Class for Qt User Interface
.. versionadded:: 0.9
"""
def __init__(self, application):
"""
Default constructor
"""
super().__init__()
self.application = application
self.splash_screen = None
self.qt_app = QApplication([])
# self.qt_app.setOverrideCursor(QCursor(Qt.BlankCursor))
def show_splash_screen(self):
"""
Show splash screen
"""
file = QFile(':herculeum.qss')
file.open(QFile.ReadOnly)
styleSheet = str(file.readAll().data(), 'ascii')
self.qt_app.setStyleSheet(styleSheet)
pixmap = QPixmap(':splash.png')
self.splash_screen = QSplashScreen(pixmap)
self.splash_screen.show()
def show_main_window(self):
"""
Show main window
"""
main_window = MainWindow(self.application,
self.application.surface_manager,
self.qt_app,
None,
Qt.FramelessWindowHint,
StartGameController(self.application.level_generator_factory,
self.application.creature_generator,
self.application.item_generator,
self.application.config.start_level))
self.splash_screen.finish(main_window)
main_window.show_new_game()
self.qt_app.exec_()
class MainWindow(QMainWindow):
"""
Class for displaying main window
.. versionadded:: 0.5
"""
def __init__(self, application, surface_manager, qt_app, parent, flags,
controller):
"""
Default constructor
"""
super().__init__(parent, flags)
self.application = application
self.surface_manager = surface_manager
self.qt_app = qt_app
self.controller = controller
self.__set_layout()
def __set_layout(self):
exit_action = QAction(QIcon(':exit-game.png'),
'&Quit',
self)
exit_action.setShortcut('Ctrl+Q')
exit_action.setStatusTip('Quit game')
exit_action.triggered.connect(PyQt4.QtGui.qApp.quit)
inventory_action = QAction(QIcon(':inventory.png'),
'Inventory',
self)
inventory_action.setShortcut('Ctrl+I')
inventory_action.setStatusTip('Show inventory')
inventory_action.triggered.connect(self.__show_menu)
character_action = QAction(QIcon(':character.png'),
'Character',
self)
character_action.setShortcut('Ctrl+C')
character_action.setStatusTip('Show character')
self.map_window = PlayMapWindow(parent=None,
model=self.application.world,
surface_manager=self.surface_manager,
action_factory=self.application.action_factory,
rng=self.application.rng,
rules_engine=self.application.rules_engine,
configuration=self.application.config)
self.setCentralWidget(self.map_window)
self.map_window.MenuRequested.connect(self.__show_menu)
self.map_window.EndScreenRequested.connect(self.__show_end_screen)
self.setGeometry(50, 50, 800, 600)
self.setWindowTitle('Herculeum')
self.setWindowIcon(QIcon(':rune-stone.png'))
self.showMaximized()
def show_new_game(self):
"""
Show new game dialog
"""
app = self.application
start_dialog = StartGameWidget(generator=app.player_generator,
config=self.application.config.controls,
parent=self,
application=self.application,
surface_manager=self.surface_manager,
flags=Qt.Dialog | Qt.CustomizeWindowHint)
result = start_dialog.exec_()
if result == QDialog.Accepted:
player = start_dialog.player_character
intro_text = self.controller.setup_world(self.application.world,
player)
player.register_for_updates(self.map_window.hit_points_widget)
self.map_window.hit_points_widget.show_hit_points(player)
self.map_window.hit_points_widget.show_spirit_points(player)
self.map_window.message_widget.text_edit.setText(intro_text)
self.__show_map_window()
def __show_map_window(self):
"""
Show map window
"""
self.map_window.construct_scene()
def __show_message_window(self, character):
"""
Show message display
:param character: character which events to display
:type character: Character
"""
messages_display = EventMessageDockWidget(self, character)
self.addDockWidget(Qt.BottomDockWidgetArea,
messages_display)
def __show_menu(self):
"""
Show menu
"""
menu_dialog = MenuDialog(self.surface_manager,
self.application.world.player,
self.application.action_factory,
self.application.config.controls,
self,
Qt.Dialog | Qt.CustomizeWindowHint)
menu_dialog.exec_()
def __show_end_screen(self):
"""
Show end screen
.. versionadded:: 0.8
"""
end_screen = EndScreen(self.application.world,
self.application.config.controls,
self,
Qt.Dialog | Qt.CustomizeWindowHint,
controller=EndScreenController())
end_screen.exec_()
self.qt_app.quit()
| tuturto/pyherc | src/herculeum/ui/gui/mainwindow.py | Python | mit | 8,099 |
from utils.face import Face
import pygame
from utils.message import Message
from utils.alarm import Alarm
class Button(pygame.sprite.Sprite):
def __init__(self, rect, color=(0,0,255), action=None):
pygame.sprite.Sprite.__init__(self)
self.color = color
self.action = action
self.rect = pygame.Rect(rect)
self.baseImage = pygame.Surface((self.rect.width, self.rect.height))
self.image = self.baseImage
def update(self):
rect = self.baseImage.get_rect()
pygame.draw.circle(self.baseImage, self.color, rect.center, rect.width/2, 1);
def touchDown(self):
rect = self.baseImage.get_rect()
pygame.draw.circle(self.baseImage, self.color, rect.center, rect.width/2, 0);
def touchUp(self):
rect = self.baseImage.get_rect()
self.image.fill(pygame.Color("black"))
pygame.draw.circle(self.baseImage, self.color, rect.center, rect.width/2, 1);
if self.action is not None:
self.action()
def setAction(self, action):
self.action = action
class Line(Face):
def __init__(self, rect, color=(0,0,255), text=""):
pygame.sprite.Sprite.__init__(self)
self._alarmList = {}
self.color = color
self.rect = pygame.Rect(rect)
self.text = text
self.baseImage = pygame.Surface((self.rect.width, self.rect.height))
self.image = self.baseImage
self.faceSprite = pygame.sprite.GroupSingle(Message((self.text,), vector=(0,0), fontsize=45, align="left", padding=0, fgcolor=(0,0,255)))
surfaceRect = self.image.get_rect()
self.faceSprite.sprite.rect.midleft = surfaceRect.midleft
def update(self):
self.faceSprite.draw(self.baseImage)
class AlarmSetting(Face):
def __init__(self, rect, alarm, color=(0,0,255)):
pygame.sprite.Sprite.__init__(self)
self._alarmList = {}
if isinstance(alarm, Alarm):
self._alarmObject = alarm
else:
raise Exception("Not an Alarm-class object")
self.color = color
self.rect = pygame.Rect(rect)
self.requestingFace = False
self.baseImage = pygame.Surface((self.rect.width, self.rect.height))
self.image = self.baseImage
self._lines = []
for i in range(4):
line = pygame.sprite.GroupSingle(Line(pygame.Rect((0, 0),(rect.height/5*4, rect.height/5)), text="Hello"))
line.sprite.rect.topright = (rect.width, rect.height/4*i)
self._lines.append(line)
def addAlarm(self):
line = pygame.sprite.GroupSingle(Button(pygame.Rect((0, 0),(self.rect.height/5, self.rect.height/5))))
line.sprite.rect.topright = (self.rect.width, self.rect.height/4)
line.sprite.setAction(self.addAlarm)
self._lines.append(line)
def update(self):
for line in self._lines:
line.update()
# line.sprite.rect.midbottom = self.image.get_rect()
line.draw(self.baseImage)
def handleEvent(self, event):
pos = pygame.mouse.get_pos()
if event.type == pygame.MOUSEBUTTONDOWN:
for butt in self._lines:
if butt.sprite.rect.collidepoint(pos):
butt.sprite.touchDown()
if event.type == pygame.MOUSEBUTTONUP:
for butt in self._lines:
if butt.sprite.rect.collidepoint(pos):
butt.sprite.touchUp()
| khan-git/pialarmclock | faces/alarmsetting.py | Python | mit | 3,537 |
# -*- coding: utf-8 -*-
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.forms import inlineformset_factory
from djangosige.apps.financeiro.models import PlanoContasGrupo, PlanoContasSubgrupo
class PlanoContasGrupoForm(forms.ModelForm):
class Meta:
model = PlanoContasGrupo
fields = ('tipo_grupo', 'descricao',)
widgets = {
'descricao': forms.TextInput(attrs={'class': 'form-control'}),
'tipo_grupo': forms.Select(attrs={'class': 'form-control'}),
}
labels = {
'descricao': _('Descrição'),
'tipo_grupo': _('Tipo de lançamento'),
}
class PlanoContasSubgrupoForm(forms.ModelForm):
class Meta:
model = PlanoContasSubgrupo
fields = ('descricao',)
widgets = {
'descricao': forms.TextInput(attrs={'class': 'form-control'}),
}
labels = {
'descricao': _('Descrição'),
}
PlanoContasSubgrupoFormSet = inlineformset_factory(
PlanoContasGrupo, PlanoContasSubgrupo, form=PlanoContasSubgrupoForm, fk_name='grupo', extra=1, can_delete=True)
| thiagopena/djangoSIGE | djangosige/apps/financeiro/forms/plano.py | Python | mit | 1,165 |
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from django.contrib.auth.admin import UserAdmin
from django.contrib import admin
from django import forms
class VoterCreationForm(UserCreationForm):
section = forms.CharField()
def save(self, commit=True):
user = super(VoterCreationForm, self).save(commit=False)
user.section = self.cleaned_data['section']
if commit:
user.save()
return user
class Meta:
model = User
fields = ('username', 'password1', 'password2', 'section', 'first_name', 'last_name', 'is_active', 'is_staff', 'is_superuser')
class VoterChangeForm(UserChangeForm):
section = forms.CharField()
def save(self, commit=True):
user = super(VoterChangeForm, self).save(commit=False)
user.section = self.cleaned_data['section']
if commit:
user.save()
return user
class Meta:
model = User
exclude = ('',)
class VoterAdmin(UserAdmin):
form = VoterChangeForm
add_form = VoterCreationForm
list_filter = UserAdmin.list_filter + ('section',)
fieldsets = (
(None, {'fields': ('username', 'password')}),
(('Personal info'), {'fields': ('first_name', 'last_name', 'section')}),
(('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username', 'password1', 'password2', 'section', 'first_name', 'last_name', 'is_active', 'is_staff', 'is_superuser')}
),
)
admin.site.unregister(User)
admin.site.register(User, VoterAdmin) | seanballais/SAElections | SAElections/voting/admin.py | Python | mit | 1,715 |
# -*- coding: utf-8 -*-
# Simple script to test sending UTF8 text with the GrowlNotifier class
import logging
logging.basicConfig(level=logging.DEBUG)
from gntp.notifier import GrowlNotifier
import platform
growl = GrowlNotifier(notifications=['Testing'],password='password',hostname='ayu')
growl.subscribe(platform.node(),platform.node(),12345)
| kfdm/gntp | test/subscribe.py | Python | mit | 347 |
#Scripts to plot the data, currently only in the context of Q&A communites.
| Nik0l/UTemPro | Plot.py | Python | mit | 76 |
from django.conf.urls.defaults import *
urlpatterns = patterns('member.views',
url(r'^$', 'login', name='passport_index'),
url(r'^register/$', 'register', name='passport_register'),
url(r'^login/$', 'login', name='passport_login'),
url(r'^logout/$', 'logout', name='passport_logout'),
url(r'^active/$', 'active', name='passport_active'),
url(r'^forget/$', 'forget', name='passport_forget'),
url(r'^profile/$', 'profile', name='passport_profile'),
)
| masiqi/douquan | member/urls.py | Python | mit | 478 |
# -*- coding: utf-8 -*-
"""
The following examples are used to demonstrate how to get/record
analytics
The method signatures are:
Pushbots.get_analytics()
and
Pushbots.record_analytics(platform=None, data=None)
In which you must specify either platform or data.
"""
from pushbots import Pushbots
def example_get_analytics():
"""Get analytics by calling Pushbots.get_analytics()"""
# Define app_id and secret
my_app_id = 'my_app_id'
my_secret = 'my_secret'
# Create a Pushbots instance
pushbots = Pushbots(app_id=my_app_id, secret=my_secret)
code, message = pushbots.get_analytics()
print('Returned code: {0}'.format(code))
print('Returned message: {0}'.format(message))
def example_record_analytics1():
"""Record analytics by passing platform directly to
Pushbots.record_analytics()
"""
# Define app_id and secret
my_app_id = 'my_app_id'
my_secret = 'my_secret'
# Create a Pushbots instance
pushbots = Pushbots(app_id=my_app_id, secret=my_secret)
# Define platform
platform = Pushbots.PLATFORM_IOS
code, message = pushbots.record_analytics(platform=platform)
print('Returned code: {0}'.format(code))
print('Returned message: {0}'.format(message))
def example_record_analytics2():
"""Record analytics by passing data defined by you to
Pushbots.record_analytics()
"""
# Define app_id and secret
my_app_id = 'my_app_id'
my_secret = 'my_secret'
# Create a Pushbots instance
pushbots = Pushbots(app_id=my_app_id, secret=my_secret)
# Define data
data = {'platform': '0'} # '0' is Equivalent to Pushbots.PLATFORM_IOS
code, message = pushbots.record_analytics(data=data)
print('Returned code: {0}'.format(code))
print('Returned message: {0}'.format(message))
| tchar/pushbots | pushbots/examples/analytics.py | Python | mit | 1,805 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import httpretty
import json
import sure
from pyeqs import QuerySet, Filter
from pyeqs.dsl import Term, Sort, ScriptScore
from tests.helpers import homogeneous
@httpretty.activate
def test_create_queryset_with_host_string():
"""
Create a queryset with a host given as a string
"""
# When create a queryset
t = QuerySet("localhost", index="bar")
# And I have records
response = {
"took": 1,
"hits": {
"total": 1,
"max_score": 1,
"hits": [
{
"_index": "bar",
"_type": "baz",
"_id": "1",
"_score": 10,
"_source": {
"foo": "bar"
},
"sort": [
1395687078000
]
}
]
}
}
httpretty.register_uri(httpretty.GET, "http://localhost:9200/bar/_search",
body=json.dumps(response),
content_type="application/json")
# When I run a query
results = t[0:1]
# Then I see the response.
len(results).should.equal(1)
@httpretty.activate
def test_create_queryset_with_host_dict():
"""
Create a queryset with a host given as a dict
"""
# When create a queryset
connection_info = {"host": "localhost", "port": 8080}
t = QuerySet(connection_info, index="bar")
# And I have records
good_response = {
"took": 1,
"hits": {
"total": 1,
"max_score": 1,
"hits": [
{
"_index": "bar",
"_type": "baz",
"_id": "1",
"_score": 10,
"_source": {
"foo": "bar"
},
"sort": [
1395687078000
]
}
]
}
}
bad_response = {
"took": 1,
"hits": {
"total": 0,
"max_score": None,
"hits": []
}
}
httpretty.register_uri(httpretty.GET, "http://localhost:9200/bar/_search",
body=json.dumps(bad_response),
content_type="application/json")
httpretty.register_uri(httpretty.GET, "http://localhost:8080/bar/_search",
body=json.dumps(good_response),
content_type="application/json")
# When I run a query
results = t[0:1]
# Then I see the response.
len(results).should.equal(1)
results[0]["_source"]["foo"].should.equal("bar")
@httpretty.activate
def test_create_queryset_with_host_list():
"""
Create a queryset with a host given as a list
"""
# When create a queryset
connection_info = [{"host": "localhost", "port": 8080}]
t = QuerySet(connection_info, index="bar")
# And I have records
good_response = {
"took": 1,
"hits": {
"total": 1,
"max_score": 1,
"hits": [
{
"_index": "bar",
"_type": "baz",
"_id": "1",
"_score": 10,
"_source": {
"foo": "bar"
},
"sort": [
1395687078000
]
}
]
}
}
bad_response = {
"took": 1,
"hits": {
"total": 0,
"max_score": None,
"hits": []
}
}
httpretty.register_uri(httpretty.GET, "http://localhost:9200/bar/_search",
body=json.dumps(bad_response),
content_type="application/json")
httpretty.register_uri(httpretty.GET, "http://localhost:8080/bar/_search",
body=json.dumps(good_response),
content_type="application/json")
# When I run a query
results = t[0:1]
# Then I see the response.
len(results).should.equal(1)
results[0]["_source"]["foo"].should.equal("bar")
| Yipit/pyeqs | tests/unit/test_connection.py | Python | mit | 4,100 |
from typing import Union, Iterator
from ...symbols import NOUN, PROPN, PRON
from ...errors import Errors
from ...tokens import Doc, Span
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Span]:
"""
Detect base noun phrases from a dependency parse. Works on both Doc and Span.
"""
# fmt: off
labels = ["nsubj", "nsubj:pass", "obj", "iobj", "ROOT", "appos", "nmod", "nmod:poss"]
# fmt: on
doc = doclike.doc # Ensure works on both Doc and Span.
if not doc.has_annotation("DEP"):
raise ValueError(Errors.E029)
np_deps = [doc.vocab.strings[label] for label in labels]
conj = doc.vocab.strings.add("conj")
np_label = doc.vocab.strings.add("NP")
prev_end = -1
for i, word in enumerate(doclike):
if word.pos not in (NOUN, PROPN, PRON):
continue
# Prevent nested chunks from being produced
if word.left_edge.i <= prev_end:
continue
if word.dep in np_deps:
prev_end = word.right_edge.i
yield word.left_edge.i, word.right_edge.i + 1, np_label
elif word.dep == conj:
head = word.head
while head.dep == conj and head.head.i < head.i:
head = head.head
# If the head is an NP, and we're coordinated to it, we're an NP
if head.dep in np_deps:
prev_end = word.right_edge.i
yield word.left_edge.i, word.right_edge.i + 1, np_label
SYNTAX_ITERATORS = {"noun_chunks": noun_chunks}
| spacy-io/spaCy | spacy/lang/id/syntax_iterators.py | Python | mit | 1,515 |
from random import randint, seed, choice, random
from numpy import zeros, uint8, cumsum, floor, ceil
from math import sqrt, log
from collections import namedtuple
from PIL import Image
from logging import info, getLogger
class Tree:
def __init__(self, leaf):
self.leaf = leaf
self.lchild = None
self.rchild = None
def get_leafs(self):
if self.lchild == None and self.rchild == None:
return [self.leaf]
else:
return self.lchild.get_leafs()+self.rchild.get_leafs()
def get_level(self, level, queue):
if queue == None:
queue = []
if level == 1:
queue.push(self)
else:
if self.lchild != None:
self.lchild.get_level(level-1, queue)
if self.rchild != None:
self.rchild.get_level(level-1, queue)
return queue
def paint(self, c):
self.leaf.paint(c)
if self.lchild != None:
self.lchild.paint(c)
if self.rchild != None:
self.rchild.paint(c)
class Container():
def __init__(self, x, y, w, h):
self.x = x
self.y = y
self.w = w
self.h = h
self.center = (self.x+int(self.w/2),self.y+int(self.h/2))
self.distance_from_center = sqrt((self.center[0]-MAP_WIDTH/2)**2 + (self.center[1]-MAP_HEIGHT/2)**2)
def paint(self, c):
c.stroke_rectangle(self.x, self.y, self.w, self.h)
def draw_path(self,c,container):
c.path(self.center[0],self.center[1],container.center[0],container.center[1])
class Canvas:
brushes = {"empty":0, "hallway":1, "room":2}
def __init__(self, w, h, color = "empty"):
self.board = zeros((h,w), dtype=uint8)
self.w = w
self.h = h
self.set_brush(color)
def set_brush(self, code):
self.color = self.brushes[code]
def stroke_rectangle(self, x, y, w, h):
self.line(x,y,w,True)
self.line(x,y+h-1,w,True)
self.line(x,y,h,False)
self.line(x+w-1,y,h,False)
def filled_rectangle(self, x, y, w, h):
self.board[y:y+h,x:x+w] = self.color
def line(self, x, y, length, horizontal):
if horizontal:
self.board[y,x:x+length] = self.color
else:
self.board[y:y+length,x] = self.color
def path(self,x1,y1,x2,y2):
self.board[y1:y2+1,x1:x2+1] = self.color
def circle(self,x,y,r):
for x_offset in range(-r,r+1):
for y_offset in range(-r,r+1):
if sqrt(x_offset**2+y_offset**2)<r:
self.board[x+x_offset,y+y_offset] = self.color
def draw(self):
im = Image.fromarray(self.board)
im.save(MAP_NAME)
def __str__(self):
return str(self.board)
class Room:
environments = ["serene", "calm", "wild", "dangerous", "evil"]
biomes = ["rock", "rugged", "sand", "mossy", "muddy", "flooded", "gelid", "gloomy", "magma"]
biomes_CDF = cumsum([0.22,0.14,0.12,0.10,0.10,0.07,0.06,0.06,0.04,0.03,0.03,0.03])
def __init__(self, container):
self.x = container.x+randint(1, floor(container.w/3))
self.y = container.y+randint(1, floor(container.h/3))
self.w = container.w-(self.x-container.x)
self.h = container.h-(self.y-container.y)
self.w -= randint(0,floor(self.w/3))
self.h -= randint(0,floor(self.w/3))
self.environment = int(min(4,10*(container.distance_from_center/MAP_WIDTH)+random()*2-1))
roll = random()*0.9+(2*container.distance_from_center/MAP_WIDTH)*0.1
self.biome = next(n for n,b in enumerate(self.biomes_CDF) if roll<b)
def paint(self,c):
c.filled_rectangle(self.x, self.y,self.w, self.h)
def random_split(container):
if container.w<MIN_ROOM_SIDE and container.h<MIN_ROOM_SIDE:
return None
def _split_vertical(container):
r1 = None
r2 = None
min_w = int(W_RATIO*container.h)+1
if container.w < 2*min_w:
return None
r1 = Container(container.x,container.y,randint(min_w, container.w-min_w),container.h)
r2 = Container(container.x+r1.w,container.y,container.w-r1.w,container.h)
return [r1, r2]
def _split_horizontal(container):
r1 = None
r2 = None
min_h = int(H_RATIO*container.w)+1
if container.h < 2*min_h:
return None
r1 = Container(container.x,container.y,container.w,randint(min_h, container.h-min_h))
r2 = Container(container.x,container.y+r1.h,container.w,container.h-r1.h)
return [r1, r2]
if randint(0,1) == 0:
res = _split_vertical(container)
if res == None:
return _split_horizontal(container)
return res
else:
res = _split_horizontal(container)
if res == None:
return _split_vertical(container)
return res
def split_container(container, iter):
root = Tree(container)
if iter != 0:
sr = random_split(container)
if sr!=None:
root.lchild = split_container(sr[0], iter-1)
root.rchild = split_container(sr[1], iter-1)
return root
def draw_paths(c, tree):
if tree.lchild == None or tree.rchild == None:
return
tree.lchild.leaf.draw_path(c, tree.rchild.leaf)
draw_paths(c, tree.lchild)
draw_paths(c, tree.rchild)
MAP_WIDTH = 0
MAP_HEIGHT = 0
N_ITERATIONS = 0
H_RATIO = 0
W_RATIO = 0
MIN_ROOM_SIDE = 0
CENTER_HUB_HOLE = 0
CENTER_HUB_RADIO = 0
MAP_NAME = 0
def init(num_players):
global MAP_WIDTH,MAP_HEIGHT,N_ITERATIONS,H_RATIO,W_RATIO,MIN_ROOM_SIDE,CENTER_HUB_HOLE,CENTER_HUB_RADIO,MAP_NAME
MAP_WIDTH=int(500*sqrt(num_players))
MAP_HEIGHT=MAP_WIDTH
N_ITERATIONS=log(MAP_WIDTH*100,2)
H_RATIO=0.49
W_RATIO=H_RATIO
MIN_ROOM_SIDE = 32
CENTER_HUB_HOLE = 32
CENTER_HUB_RADIO = CENTER_HUB_HOLE-MIN_ROOM_SIDE/2
MAP_NAME="result%s.png"%MAP_WIDTH
def main(num_players, seed_number):
logger = getLogger('BSPTree')
logger.info("Initialising")
init(num_players)
seed(seed_number)
canvas = Canvas(MAP_WIDTH, MAP_HEIGHT)
canvas.set_brush("empty")
canvas.filled_rectangle(0,0,MAP_WIDTH,MAP_HEIGHT)
logger.info("Generating container tree")
# -1 on the main container to remove borders to avoid opened border rooms
main_container = Container(0, 0, MAP_WIDTH-1, MAP_HEIGHT-1)
container_tree = split_container(main_container, N_ITERATIONS)
logger.info("Generating hallways")
canvas.set_brush("hallway")
draw_paths(canvas, container_tree)
logger.info("Generating rooms")
canvas.set_brush("room")
leafs = container_tree.get_leafs()
rooms = []
for i in range(0, len(leafs)):
if CENTER_HUB_HOLE < leafs[i].distance_from_center < MAP_WIDTH/2:
rooms.append(Room(leafs[i]))
rooms[-1].paint(canvas)
logger.info("Generating hub")
canvas.circle(int(MAP_WIDTH/2),int(MAP_HEIGHT/2),int(CENTER_HUB_RADIO))
#canvas.draw()
return (rooms, canvas.board) | juancroldan/derinkuyu | generation/BSPTree.py | Python | mit | 6,186 |
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
def _uniform(*shape):
return numpy.random.uniform(-1, 1, shape).astype(numpy.float32)
@testing.parameterize(*testing.product({
'in_shapes': [((2,), (4,)), ((2, 1), (4, 2))],
'out_size': [3],
'batch_size': [2]
}))
class TestBilinearFunction(unittest.TestCase):
def setUp(self):
e1_shape = (self.batch_size,) + self.in_shapes[0]
e2_shape = (self.batch_size,) + self.in_shapes[1]
e1_size = numpy.prod(self.in_shapes[0])
e2_size = numpy.prod(self.in_shapes[1])
self.e1 = _uniform(*e1_shape)
self.e2 = _uniform(*e2_shape)
self.W = _uniform(e1_size, e2_size, self.out_size)
self.V1 = _uniform(e1_size, self.out_size)
self.V2 = _uniform(e2_size, self.out_size)
self.b = _uniform(self.out_size)
self.gy = _uniform(self.batch_size, self.out_size)
self.gge1 = _uniform(*self.e1.shape)
self.gge2 = _uniform(*self.e2.shape)
self.ggW = _uniform(*self.W.shape)
self.ggV1 = _uniform(*self.V1.shape)
self.ggV2 = _uniform(*self.V2.shape)
self.ggb = _uniform(*self.b.shape)
self.check_backward_options = {
'atol': 1e-5, 'rtol': 1e-4, 'dtype': numpy.float64}
self.check_double_backward_options = {
'atol': 1e-4, 'rtol': 1e-3, 'dtype': numpy.float64}
def check_forward(self, e1_data, e2_data, W_data, V1_data, V2_data,
b_data):
e1 = chainer.Variable(e1_data)
e2 = chainer.Variable(e2_data)
W = chainer.Variable(W_data)
e1_data = e1_data.reshape(e1_data.shape[0], -1)
e2_data = e2_data.reshape(e2_data.shape[0], -1)
xp = cuda.get_array_module(e1)
y_expect = xp.einsum('ij,ik,jkl->il', e1_data, e2_data, W_data)
flags = V1_data is None, V2_data is None, b_data is None
if any(flags):
if not all(flags):
raise ValueError(
'Test either all or none of the optional parameters.')
y = functions.bilinear(e1, e2, W)
else:
V1 = chainer.Variable(V1_data)
V2 = chainer.Variable(V2_data)
b = chainer.Variable(b_data)
y = functions.bilinear(e1, e2, W, V1, V2, b)
y_expect = xp.einsum('ij,ik,jkl->il', e1_data, e2_data, W_data)
y_expect += e1_data.dot(V1_data)
y_expect += e2_data.dot(V2_data)
y_expect += b_data
testing.assert_allclose(y_expect, cuda.to_cpu(y.data))
assert y.data.dtype == e1_data.dtype
def test_forward_cpu(self):
self.check_forward(self.e1, self.e2, self.W, self.V1, self.V2, self.b)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(
cuda.to_gpu(self.e1), cuda.to_gpu(self.e2), cuda.to_gpu(self.W),
cuda.to_gpu(self.V1), cuda.to_gpu(self.V2), cuda.to_gpu(self.b))
def test_partial_backward_cpu(self):
gradient_check.check_backward(
functions.bilinear, (self.e1, self.e2, self.W), self.gy,
**self.check_backward_options)
@attr.gpu
def test_partial_backward_gpu(self):
gradient_check.check_backward(
functions.bilinear,
(cuda.to_gpu(self.e1), cuda.to_gpu(self.e2), cuda.to_gpu(self.W)),
cuda.to_gpu(self.gy), **self.check_backward_options)
def test_full_backward_cpu(self):
gradient_check.check_backward(
functions.bilinear,
(self.e1, self.e2, self.W, self.V1, self.V2, self.b), self.gy,
**self.check_backward_options)
@attr.gpu
def test_full_backward_gpu(self):
gradient_check.check_backward(
functions.bilinear,
(cuda.to_gpu(self.e1), cuda.to_gpu(self.e2), cuda.to_gpu(self.W),
cuda.to_gpu(self.V1), cuda.to_gpu(self.V2), cuda.to_gpu(self.b)),
cuda.to_gpu(self.gy), **self.check_backward_options)
def test_partial_double_backward_cpu(self):
gradient_check.check_double_backward(
functions.bilinear, (self.e1, self.e2, self.W), self.gy,
(self.gge1, self.gge2, self.ggW), **self.check_backward_options)
@attr.gpu
def test_partial_double_backward_gpu(self):
gradient_check.check_double_backward(
functions.bilinear,
(cuda.to_gpu(self.e1), cuda.to_gpu(self.e2), cuda.to_gpu(self.W)),
cuda.to_gpu(self.gy),
(cuda.to_gpu(self.gge1), cuda.to_gpu(self.gge2),
cuda.to_gpu(self.ggW)), **self.check_backward_options)
def test_full_double_backward_cpu(self):
def f(*inputs):
y = functions.bilinear(*inputs)
return y * y
gradient_check.check_double_backward(
f, (self.e1, self.e2, self.W, self.V1, self.V2, self.b),
self.gy,
(self.gge1, self.gge2, self.ggW, self.ggV1, self.ggV2, self.ggb),
**self.check_double_backward_options)
@attr.gpu
def test_full_double_backward_gpu(self):
def f(*inputs):
y = functions.bilinear(*inputs)
return y * y
gradient_check.check_double_backward(
f,
(cuda.to_gpu(self.e1), cuda.to_gpu(self.e2), cuda.to_gpu(self.W),
cuda.to_gpu(self.V1), cuda.to_gpu(self.V2), cuda.to_gpu(self.b)),
cuda.to_gpu(self.gy),
(cuda.to_gpu(self.gge1), cuda.to_gpu(self.gge2),
cuda.to_gpu(self.ggW), cuda.to_gpu(self.V1), cuda.to_gpu(self.V2),
cuda.to_gpu(self.ggb)), **self.check_double_backward_options)
@attr.slow
class TestBilinearFunctionLarge(unittest.TestCase):
def setUp(self):
self.e1 = _uniform(256, 256)
self.e2 = _uniform(256, 256)
self.w = _uniform(256, 256, 256)
self.v1 = _uniform(256, 256)
self.v2 = _uniform(256, 256)
self.b = _uniform(256)
def test_cpu(self):
chainer.functions.bilinear(
self.e1, self.e2, self.w, self.v1, self.v2, self.b)
@attr.gpu
def test_gpu(self):
chainer.functions.bilinear(*map(cuda.to_gpu, (
self.e1, self.e2, self.w, self.v1, self.v2, self.b)))
class TestBilinearFunctionInvalidArgument(unittest.TestCase):
def setUp(self):
e1 = _uniform(3, 2)
e2 = _uniform(3, 4)
W = _uniform(2, 4, 5)
V1 = _uniform(2, 5)
self.e1 = chainer.Variable(e1)
self.e2 = chainer.Variable(e2)
self.W = chainer.Variable(W)
self.V1 = chainer.Variable(V1)
def test_invalid_full_partial_ambiguous(self):
with self.assertRaises(ValueError):
functions.bilinear(self.e1, self.e2, self.W, self.V1)
testing.run_module(__name__, __file__)
| anaruse/chainer | tests/chainer_tests/functions_tests/connection_tests/test_bilinear.py | Python | mit | 6,944 |
from modelmapper.declarations import Mapper, Field
from modelmapper.qt.fields import QLineEditAccessor
class String(QLineEditAccessor):
def get_value(self):
return str(self.widget.text())
def set_value(self, value):
self.widget.setText(str(value))
class Integer(QLineEditAccessor):
def get_value(self):
return int(self.widget.text())
def set_value(self, value):
self.widget.setText(int(value))
def get_child_x_mapper(x):
return {
'{}_link'.format(x): (x, 'val_{}'.format(x))
}
def get_d_mapper():
return {
'expediente_link': Mapper('c[0]', 'val_c[0]', get_child_x_mapper('a')),
'masa_bruta_link': Mapper('c[1]', 'val_c[1]', get_child_x_mapper('b')),
'nombre_link': Field('cc', 'val_cc'),
}
def get_model_mapper():
return {
'expediente_link': Field('expediente', String('expediente')),
'masa_bruta_link': Field('masa_bruta', Integer('masa_bruta')),
'nombre_link': Field('nombre', String('nombre'))
}
| franramirez688/model-mapper | tests/factory/qt/mapper_data.py | Python | mit | 1,040 |
__author__ = 'bptripp'
import numpy as np
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
"""
Initialization of CNNs via clustering of inputs and convex optimization
of outputs.
"""
def sigmoid(x, centre, gain):
y = 1 / (1 + np.exp(-gain*(x-centre)))
return y
def gaussian(x, mu, sigma):
return np.exp(-np.power(x - mu, 2.) / (2 * np.power(sigma, 2.)))
def get_sigmoid_params(false_samples, true_samples, do_plot=False):
"""
Find gain and bias for sigmoid function that approximates probability
of class memberships. Probability based on Bayes' rule & gaussian
model of samples from each class.
"""
false_mu = np.mean(false_samples)
false_sigma = np.std(false_samples)
true_mu = np.mean(true_samples)
true_sigma = np.std(true_samples)
lowest = np.minimum(np.min(false_samples), np.min(true_samples))
highest = np.maximum(np.max(false_samples), np.max(true_samples))
a = np.arange(lowest, highest, (highest-lowest)/25)
p_x_false = gaussian(a, false_mu, false_sigma)
p_x_true = gaussian(a, true_mu, true_sigma)
p_x = p_x_true + p_x_false
p_true = p_x_true / p_x
popt, _ = curve_fit(sigmoid, a, p_true)
centre, gain = popt[0], popt[1]
if do_plot:
plt.hist(false_samples, a)
plt.hist(true_samples, a)
plt.plot(a, 100*sigmoid(a, centre, gain))
plt.plot(a, 100*p_true)
plt.title('centre: ' + str(centre) + ' gain: ' + str(gain))
plt.show()
return centre, gain
def check_sigmoid():
n = 1000
false_samples = 1 + .3*np.random.randn(n)
true_samples = -1 + 1*np.random.randn(n)
centre, gain = get_sigmoid_params(false_samples, true_samples, do_plot=True)
def get_convolutional_prototypes(samples, shape, patches_per_sample=5):
assert len(samples.shape) == 4
assert len(shape) == 4
wiggle = (samples.shape[2]-shape[2], samples.shape[3]-shape[3])
patches = []
for sample in samples:
for i in range(patches_per_sample):
corner = (np.random.randint(0, wiggle[0]), np.random.randint(0, wiggle[1]))
patches.append(sample[:,corner[0]:corner[0]+shape[2],corner[1]:corner[1]+shape[3]])
patches = np.array(patches)
flat = np.reshape(patches, (patches.shape[0], -1))
km = KMeans(shape[0])
km.fit(flat)
kernels = km.cluster_centers_
# normalize product of centre and corresponding kernel
for i in range(kernels.shape[0]):
kernels[i,:] = kernels[i,:] / np.linalg.norm(kernels[i,:])
return np.reshape(kernels, shape)
def get_dense_prototypes(samples, n):
km = KMeans(n)
km.fit(samples)
return km.cluster_centers_
def check_get_prototypes():
samples = np.random.rand(1000, 2, 28, 28)
prototypes = get_convolutional_prototypes(samples, (20,2,5,5))
print(prototypes.shape)
samples = np.random.rand(900, 2592)
prototypes = get_dense_prototypes(samples, 64)
print(prototypes.shape)
def get_discriminant(samples, labels):
lda = LinearDiscriminantAnalysis(solver='eigen', shrinkage='auto')
lda.fit(samples, labels)
return lda.coef_[0]
def check_discriminant():
n = 1000
labels = np.random.rand(n) < 0.5
samples = np.zeros((n,2))
for i in range(len(labels)):
if labels[i] > 0.5:
samples[i,:] = np.array([0,1]) + 1*np.random.randn(1,2)
else:
samples[i,:] = np.array([-2,-1]) + .5*np.random.randn(1,2)
coeff = get_discriminant(samples, labels)
plt.figure(figsize=(10,5))
plt.subplot(1,2,1)
plt.scatter(samples[labels>.5,0], samples[labels>.5,1], color='g')
plt.scatter(samples[labels<.5,0], samples[labels<.5,1], color='r')
plt.plot([-coeff[0], coeff[0]], [-coeff[1], coeff[1]], color='k')
plt.subplot(1,2,2)
get_sigmoid_params(np.dot(samples[labels<.5], coeff),
np.dot(samples[labels>.5], coeff),
do_plot=True)
plt.show()
def init_model(model, X_train, Y_train):
if not (isinstance(model.layers[-1], Activation) \
and model.layers[-1].activation.__name__ == 'sigmoid'\
and isinstance(model.layers[-2], Dense)):
raise Exception('This does not look like an LDA-compatible network, which is all we support')
for i in range(len(model.layers)-2):
if isinstance(model.layers[i], Convolution2D):
inputs = get_inputs(model, X_train, i)
w, b = model.layers[i].get_weights()
w = get_convolutional_prototypes(inputs, w.shape)
b = .1 * np.ones_like(b)
model.layers[i].set_weights([w,b])
if isinstance(model.layers[i], Dense):
inputs = get_inputs(model, X_train, i)
w, b = model.layers[i].get_weights()
w = get_dense_prototypes(inputs, w.shape[1]).T
b = .1 * np.ones_like(b)
model.layers[i].set_weights([w,b])
inputs = get_inputs(model, X_train, len(model.layers)-3)
coeff = get_discriminant(inputs, Y_train)
centre, gain = get_sigmoid_params(np.dot(inputs[Y_train<.5], coeff),
np.dot(inputs[Y_train>.5], coeff))
w = coeff*gain
w = w[:,np.newaxis]
b = np.array([-centre])
model.layers[-2].set_weights([w,b])
sigmoid_inputs = get_inputs(model, X_train, len(model.layers)-1)
plt.figure()
plt.subplot(2,1,1)
bins = np.arange(np.min(Y_train), np.max(Y_train))
plt.hist(sigmoid_inputs[Y_train<.5])
plt.subplot(2,1,2)
plt.hist(sigmoid_inputs[Y_train>.5])
plt.show()
def get_inputs(model, X_train, layer):
if layer == 0:
return X_train
else:
partial_model = Sequential(layers=model.layers[:layer])
partial_model.compile('sgd', 'mse')
return partial_model.predict(X_train)
if __name__ == '__main__':
# check_sigmoid()
# check_get_prototypes()
# check_discriminant()
import cPickle
f = file('../data/bowl-test.pkl', 'rb')
# f = file('../data/depths/24_bowl-29-Feb-2016-15-01-53.pkl', 'rb')
d, bd, l = cPickle.load(f)
f.close()
d = d - np.mean(d.flatten())
d = d / np.std(d.flatten())
# n = 900
n = 90
X_train = np.zeros((n,1,80,80))
X_train[:,0,:,:] = d[:n,:,:]
Y_train = l[:n]
model = Sequential()
model.add(Convolution2D(64,9,9,input_shape=(1,80,80)))
model.add(Activation('relu'))
model.add(MaxPooling2D())
# model.add(Convolution2D(64,3,3))
# model.add(Activation('relu'))
# model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
init_model(model, X_train, Y_train)
# from visualize import plot_kernels
# plot_kernels(model.layers[0].get_weights()[0])
| bptripp/grasp-convnet | py/cninit.py | Python | mit | 7,083 |
# -*- coding: utf-8 -*-
tokens = [
'LPAREN',
'RPAREN',
'LBRACE',
'RBRACE',
'EQUAL',
'DOUBLE_EQUAL',
'NUMBER',
'COMMA',
'VAR_DEFINITION',
'IF',
'ELSE',
'END',
'ID',
'PRINT'
]
t_LPAREN = r"\("
t_RPAREN = r"\)"
t_LBRACE = r"\{"
t_RBRACE = r"\}"
t_EQUAL = r"\="
t_DOUBLE_EQUAL = r"\=\="
def t_NUMBER(token):
r"[0-9]+"
token.value = int(token.value)
return token
t_COMMA = r","
def t_VAR_DEFINITION(token):
r",\sFirst\sof\s(his|her)\sName"
return token
def t_IF(token):
r"I\spromise"
return token
def t_ELSE(token):
r"Mayhaps"
return token
def t_PRINT(token):
r"Hodor"
return token
def t_END(token):
r"And\snow\shis\swatch\sis\sended"
return token
def t_ID(token):
r"[a-zA-Z][_a-zA-Z0-9]*"
return token
t_ignore = " \t"
def t_NEWLINE(token):
r"\n+"
token.lexer.lineno += len(token.value)
def t_IGNORE_COMMENTS(token):
r"//(.*)\n"
token.lexer.lineno += 1
def t_error(token):
raise Exception("Sintax error: Unknown token on line {0}. \"{1}\"".format(token.lineno, token.value.partition("\n")[0]))
| pablogonzalezalba/a-language-of-ice-and-fire | lexer_rules.py | Python | mit | 1,138 |
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui, QtCore
import numpy#
from pyqtgraph.parametertree import Parameter, ParameterTree, ParameterItem, registerParameterType
class FeatureSelectionDialog(QtGui.QDialog):
def __init__(self,viewer, parent):
super(FeatureSelectionDialog, self).__init__(parent)
self.resize(800,600)
self.viewer = viewer
self.layout = QtGui.QVBoxLayout()
self.setLayout(self.layout)
self.buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Ok|QtGui.QDialogButtonBox.Cancel)
self.buttonBox.accepted.connect(self.onPressAccepted)
def makeCheckBox(name, val=True):
return {
'name': name,
'type': 'bool',
'value': val,
#'tip': "This is a checkbox",
}
sigmaOpt = {'name': 'sigma', 'type': 'str', 'value': '[0.0, 1.0, 2.0, 4.0]' }
wardOpts = {'name': 'wardness', 'type': 'str', 'value': '[0.0, 0.1, 0.2]' }
filterChild = [
makeCheckBox("computeFilter"),
sigmaOpt,
{
'name':'UCM',
'children': [
makeCheckBox("ucmFilters"),
wardOpts,
{'name': 'meanSign', 'type': 'float', 'value': '1.0' }
]
}
]
params = [
{
'name' : "RawData",
'type' : 'group',
'children' : [
{
'name': 'Compute Features On Raw Data',
'type': 'bool',
'value': True,
'tip': "This is a checkbox",
},
{
'name' : "0-Order Filter",
'type' : 'group',
'children' : filterChild
},
{
'name' : "1-Order Filter",
'type' : 'group',
'children' : filterChild
},
{
'name' : "2-Order Filter",
'type' : 'group',
'children' : filterChild
}
]
},
#ComplexParameter(name='Custom parameter group (reciprocal values)'),
#ScalableGroup(name="Expandable Parameter Group", children=[
# {'name': 'ScalableParam 1', 'type': 'str', 'value': "default param 1"},
# {'name': 'ScalableParam 2', 'type': 'str', 'value': "default param 2"},
#]),
]
## Create tree of Parameter objects
self.p = Parameter.create(name='params', type='group', children=params)
self.t = ParameterTree()
self.t.setParameters(self.p, showTop=False)
self.layout.addWidget(self.t)
self.layout.addWidget(self.buttonBox)
## If anything changes in the tree, print a message
def change(param, changes):
print("tree changes:")
for param, change, data in changes:
path = self.p.childPath(param)
if path is not None:
childName = '.'.join(path)
else:
childName = param.name()
print(' parameter: %s'% childName)
print(' change: %s'% change)
print(' data: %s'% str(data))
print(' ----------')
self.p.sigTreeStateChanged.connect(change)
def onPressAccepted(self):
self.hide()
self.viewer.onClickedComputeFeaturesImpl(self.p)
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Escape:
self.hide()
event.accept()
else:
super(QtGui.QDialog, self).keyPressEvent(event)
| timoMa/vigra | vigranumpy/examples/boundary_gui/bv_feature_selection.py | Python | mit | 3,993 |
from engine.api import API
from engine.utils.printing_utils import progressBar
from setup.utils.datastore_utils import repair_corrupt_reference, link_references_to_paper
def remove_duplicates_from_cited_by():
print("\nRemove Duplicates")
api = API()
papers = api.get_all_paper()
for i, paper in enumerate(papers):
progressBar(i, len(papers))
paper.cited_by = list(dict.fromkeys(paper.cited_by))
api.client.update_paper(paper)
def check_references():
print("\nCheck References")
api = API()
papers = api.get_all_paper()
for i, paper in enumerate(papers):
progressBar(i, len(papers))
other_papers = [p for p in papers if p.id != paper.id]
for reference in paper.references:
if not reference.get_paper_id():
continue
ref_paper = api.get_paper(reference.get_paper_id())
if ref_paper.cited_by.count(paper.id) == 0:
print()
reference.paper_id = []
api.client.update_paper(paper)
repair_corrupt_reference(reference, paper, other_papers, api)
def check_cited_by():
print("\nCheck Cited by")
api = API()
papers = api.get_all_paper()
for i, paper in enumerate(papers):
progressBar(i, len(papers))
for cited_paper_id in paper.cited_by:
if not api.contains_paper(cited_paper_id):
paper.cited_by.remove(cited_paper_id)
api.client.update_paper(paper)
continue
cited_paper = api.get_paper(cited_paper_id)
cited_paper_refs = [ref.get_paper_id() for ref in cited_paper.references if ref.get_paper_id()]
if cited_paper_refs.count(paper.id) == 0:
print()
paper.cited_by.remove(cited_paper_id)
api.client.update_paper(paper)
link_references_to_paper(cited_paper, paper, api)
def perform_checks():
check_cited_by()
remove_duplicates_from_cited_by()
check_references()
if __name__ == "__main__":
perform_checks()
exit(0)
| thomasmauerhofer/search-engine | src/setup/check_for_currupt_references.py | Python | mit | 2,124 |
from polyphony import testbench
def g(x):
if x == 0:
return 0
return 1
def h(x):
if x == 0:
pass
def f(v, i, j, k):
if i == 0:
return v
elif i == 1:
return v
elif i == 2:
h(g(j) + g(k))
return v
elif i == 3:
for m in range(j):
v += 2
return v
else:
for n in range(i):
v += 1
return v
def if28(code, r1, r2, r3, r4):
if code == 0:
return f(r1, r2, r3, r4)
return 0
@testbench
def test():
assert 1 == if28(0, 1, 1, 0, 0)
assert 2 == if28(0, 2, 0, 0, 0)
assert 3 == if28(0, 3, 1, 0, 0)
assert 4 == if28(0, 4, 2, 0, 0)
assert 5 == if28(0, 5, 2, 1, 1)
assert 6 == if28(0, 6, 2, 2, 2)
assert 7 == if28(0, 7, 3, 0, 0)
assert 10 == if28(0, 8, 3, 1, 1)
assert 13 == if28(0, 9, 3, 2, 2)
assert 14 == if28(0, 10, 4, 0, 0)
test()
| ktok07b6/polyphony | tests/if/if28.py | Python | mit | 922 |
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.dirname(BASE_DIR))
from global_variables import *
from evaluation_helper import *
cls_names = g_shape_names
img_name_file_list = [os.path.join(g_real_images_voc12val_det_bbox_folder, name+'.txt') for name in cls_names]
det_bbox_mat_file_list = [os.path.join(g_detection_results_folder, x.rstrip()) for x in open(g_rcnn_detection_bbox_mat_filelist)]
result_folder = os.path.join(BASE_DIR, 'avp_test_results')
test_avp_nv(cls_names, img_name_file_list, det_bbox_mat_file_list, result_folder)
img_name_file_list = [os.path.join(g_real_images_voc12val_easy_gt_bbox_folder, name+'.txt') for name in cls_names]
view_label_folder = g_real_images_voc12val_easy_gt_bbox_folder
result_folder = os.path.join(BASE_DIR, 'vp_test_results')
test_vp_acc(cls_names, img_name_file_list, result_folder, view_label_folder)
| ShapeNet/RenderForCNN | view_estimation/run_evaluation.py | Python | mit | 932 |
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Theo Crevon
# Copyright (c) 2013, Greg Leclercq
#
# See the file LICENSE for copying permission.
from boto.swf.exceptions import SWFResponseError
from swf.constants import REGISTERED
from swf.querysets.base import BaseQuerySet
from swf.models import Domain
from swf.models.workflow import (WorkflowType, WorkflowExecution,
CHILD_POLICIES)
from swf.utils import datetime_timestamp, past_day, get_subkey
from swf.exceptions import (ResponseError, DoesNotExistError,
InvalidKeywordArgumentError, AlreadyExistsError)
class BaseWorkflowQuerySet(BaseQuerySet):
"""Base domain bounded workflow queryset objects
Amazon workflows types and executions are always bounded
to a specific domain: so any queryset which means to deal
with workflows has to be built against a `domain`
:param domain: domain the inheriting queryset belongs to
:type domain: swf.model.domain.Domain
"""
# Amazon response section corresponding
# to current queryset informations
_infos = 'typeInfo'
_infos_plural = 'typeInfos'
def __init__(self, domain, *args, **kwargs):
super(BaseWorkflowQuerySet, self).__init__(*args, **kwargs)
Domain.check(domain)
self.domain = domain
@property
def domain(self):
if not hasattr(self, '_domain'):
self._domain = None
return self._domain
@domain.setter
def domain(self, value):
# Avoiding circular import
from swf.models.domain import Domain
if not isinstance(value, Domain):
err = "domain property has to be of"\
"swf.model.domain.Domain type, not %r"\
% type(value)
raise TypeError(err)
self._domain = value
def _list(self, *args, **kwargs):
raise NotImplementedError
def _list_items(self, *args, **kwargs):
response = {'nextPageToken': None}
while 'nextPageToken' in response:
response = self._list(
*args,
next_page_token=response['nextPageToken'],
**kwargs
)
for item in response[self._infos_plural]:
yield item
class WorkflowTypeQuerySet(BaseWorkflowQuerySet):
# Explicit is better than implicit, keep zen
_infos = 'typeInfo'
_infos_plural = 'typeInfos'
def to_WorkflowType(self, domain, workflow_info, **kwargs):
# Not using get_subkey in order for it to explictly
# raise when workflowType name doesn't exist for example
return WorkflowType(
domain,
workflow_info['workflowType']['name'],
workflow_info['workflowType']['version'],
status=workflow_info['status'],
**kwargs
)
def get(self, name, version, *args, **kwargs):
"""Fetches the Workflow Type with `name` and `version`
:param name: name of the workflow type
:type name: String
:param version: workflow type version
:type version: String
:returns: matched workflow type instance
:rtype: swf.core.model.workflow.WorkflowType
A typical Amazon response looks like:
.. code-block:: json
{
"configuration": {
"defaultExecutionStartToCloseTimeout": "300",
"defaultTaskStartToCloseTimeout": "300",
"defaultTaskList": {
"name": "None"
},
"defaultChildPolicy": "TERMINATE"
},
"typeInfo": {
"status": "REGISTERED",
"creationDate": 1364492094.968,
"workflowType": {
"version": "1",
"name": "testW"
}
}
}
"""
try:
response = self.connection.describe_workflow_type(self.domain.name, name, version)
except SWFResponseError as e:
if e.error_code == 'UnknownResourceFault':
raise DoesNotExistError(e.body['message'])
raise ResponseError(e.body['message'])
wt_info = response[self._infos]
wt_config = response['configuration']
task_list = kwargs.get('task_list')
if task_list is None:
task_list = get_subkey(wt_config, ['defaultTaskList', 'name'])
child_policy = kwargs.get('child_policy')
if child_policy is None:
child_policy = wt_config.get('defaultChildPolicy')
decision_task_timeout = kwargs.get('decision_task_timeout')
if decision_task_timeout is None:
decision_task_timeout = wt_config.get(
'defaultTaskStartToCloseTimeout')
execution_timeout = kwargs.get('execution_timeout')
if execution_timeout is None:
execution_timeout = wt_config.get(
'defaultExecutionStartToCloseTimeout')
decision_tasks_timeout = kwargs.get('decision_tasks_timeout')
if decision_tasks_timeout is None:
decision_tasks_timeout = wt_config.get(
'defaultTaskStartToCloseTimeout')
return self.to_WorkflowType(
self.domain,
wt_info,
task_list=task_list,
child_policy=child_policy,
execution_timeout=execution_timeout,
decision_tasks_timeout=decision_tasks_timeout,
)
def get_or_create(self, name, version,
status=REGISTERED,
creation_date=0.0,
deprecation_date=0.0,
task_list=None,
child_policy=CHILD_POLICIES.TERMINATE,
execution_timeout='300',
decision_tasks_timeout='300',
description=None,
*args, **kwargs):
"""Fetches, or creates the ActivityType with ``name`` and ``version``
When fetching trying to fetch a matching workflow type, only
name and version parameters are taken in account.
Anyway, If you'd wanna make sure that in case the workflow type
has to be created it is made with specific values, just provide it.
:param name: name of the workflow type
:type name: String
:param version: workflow type version
:type version: String
:param status: workflow type status
:type status: swf.core.ConnectedSWFObject.{REGISTERED, DEPRECATED}
:param creation_date: creation date of the current WorkflowType
:type creation_date: float (timestamp)
:param deprecation_date: deprecation date of WorkflowType
:type deprecation_date: float (timestamp)
:param task_list: task list to use for scheduling decision tasks for executions
of this workflow type
:type task_list: String
:param child_policy: policy to use for the child workflow executions
when a workflow execution of this type is terminated
:type child_policy: CHILD_POLICIES.{TERMINATE |
REQUEST_CANCEL |
ABANDON}
:param execution_timeout: maximum duration for executions of this workflow type
:type execution_timeout: String
:param decision_tasks_timeout: maximum duration of decision tasks for this workflow type
:type decision_tasks_timeout: String
:param description: Textual description of the workflow type
:type description: String
:returns: Fetched or created WorkflowType model object
:rtype: WorkflowType
"""
try:
return self.get(name,
version,
task_list=task_list,
child_policy=child_policy,
execution_timeout=execution_timeout,
decision_tasks_timeout=decision_tasks_timeout)
except DoesNotExistError:
try:
return self.create(
name,
version,
status=status,
creation_date=creation_date,
deprecation_date=deprecation_date,
task_list=task_list,
child_policy=child_policy,
execution_timeout=execution_timeout,
decision_tasks_timeout=decision_tasks_timeout,
description=description,
)
# race conditon could happen if two workflows trying to register the same type
except AlreadyExistsError:
return self.get(name,
version,
task_list=task_list,
child_policy=child_policy,
execution_timeout=execution_timeout,
decision_tasks_timeout=decision_tasks_timeout)
def _list(self, *args, **kwargs):
return self.connection.list_workflow_types(*args, **kwargs)
def filter(self, domain=None,
registration_status=REGISTERED,
name=None,
*args, **kwargs):
"""Filters workflows based on the ``domain`` they belong to,
their ``status``, and/or their ``name``
:param domain: domain the workflow type belongs to
:type domain: swf.models.domain.Domain
:param registration_status: workflow type registration status to match,
Valid values are:
* ``swf.constants.REGISTERED``
* ``swf.constants.DEPRECATED``
:type registration_status: string
:param name: workflow type name to match
:type name: string
:returns: list of matched WorkflowType models objects
:rtype: list
"""
# As WorkflowTypeQuery has to be built against a specific domain
# name, domain filter is disposable, but not mandatory.
domain = domain or self.domain
return [self.to_WorkflowType(domain, wf) for wf in
self._list_items(domain.name, registration_status, name=name)]
def all(self, registration_status=REGISTERED, *args, **kwargs):
"""Retrieves every Workflow types
:param registration_status: workflow type registration status to match,
Valid values are:
* ``swf.constants.REGISTERED``
* ``swf.constants.DEPRECATED``
:type registration_status: string
A typical Amazon response looks like:
.. code-block:: json
{
"typeInfos": [
{
"status": "REGISTERED",
"creationDate": 1364293450.67,
"description": "",
"workflowType": {
"version": "1",
"name": "Crawl"
}
},
{
"status": "REGISTERED",
"creationDate": 1364492094.968,
"workflowType": {
"version": "1",
"name": "testW"
}
}
]
}
"""
return self.filter(registration_status=registration_status)
def create(self, name, version,
status=REGISTERED,
creation_date=0.0,
deprecation_date=0.0,
task_list=None,
child_policy=CHILD_POLICIES.TERMINATE,
execution_timeout='300',
decision_tasks_timeout='300',
description=None,
*args, **kwargs):
"""Creates a new remote workflow type and returns the
created WorkflowType model instance.
:param name: name of the workflow type
:type name: String
:param version: workflow type version
:type version: String
:param status: workflow type status
:type status: swf.core.ConnectedSWFObject.{REGISTERED, DEPRECATED}
:param creation_date: creation date of the current WorkflowType
:type creation_date: float (timestamp)
:param deprecation_date: deprecation date of WorkflowType
:type deprecation_date: float (timestamp)
:param task_list: task list to use for scheduling decision tasks for executions
of this workflow type
:type task_list: String
:param child_policy: policy to use for the child workflow executions
when a workflow execution of this type is terminated
:type child_policy: CHILD_POLICIES.{TERMINATE |
REQUEST_CANCEL |
ABANDON}
:param execution_timeout: maximum duration for executions of this workflow type
:type execution_timeout: String
:param decision_tasks_timeout: maximum duration of decision tasks for this workflow type
:type decision_tasks_timeout: String
:param description: Textual description of the workflow type
:type description: String
"""
workflow_type = WorkflowType(
self.domain,
name,
version,
status=status,
creation_date=creation_date,
deprecation_date=deprecation_date,
task_list=task_list,
child_policy=child_policy,
execution_timeout=execution_timeout,
decision_tasks_timeout=decision_tasks_timeout,
description=description
)
workflow_type.save()
return workflow_type
class WorkflowExecutionQuerySet(BaseWorkflowQuerySet):
"""Fetches Workflow executions"""
_infos = 'executionInfo'
_infos_plural = 'executionInfos'
def _is_valid_status_param(self, status, param):
statuses = {
WorkflowExecution.STATUS_OPEN: set([
'oldest_date',
'latest_date'],
),
WorkflowExecution.STATUS_CLOSED: set([
'start_latest_date',
'start_oldest_date',
'close_latest_date',
'close_oldest_date',
'close_status'
]),
}
return param in statuses.get(status, set())
def _validate_status_parameters(self, status, params):
return [param for param in params if
not self._is_valid_status_param(status, param)]
def list_workflow_executions(self, status, *args, **kwargs):
statuses = {
WorkflowExecution.STATUS_OPEN: 'open',
WorkflowExecution.STATUS_CLOSED: 'closed',
}
# boto.swf.list_closed_workflow_executions awaits a `start_oldest_date`
# MANDATORY kwarg, when boto.swf.list_open_workflow_executions awaits a
# `oldest_date` mandatory arg.
if status == WorkflowExecution.STATUS_OPEN:
kwargs['oldest_date'] = kwargs.pop('start_oldest_date')
try:
method = 'list_{}_workflow_executions'.format(statuses[status])
return getattr(self.connection, method)(*args, **kwargs)
except KeyError:
raise ValueError("Unknown status provided: %s" % status)
def get_workflow_type(self, execution_info):
workflow_type = execution_info['workflowType']
workflow_type_qs = WorkflowTypeQuerySet(self.domain)
return workflow_type_qs.get(
workflow_type['name'],
workflow_type['version'],
)
def to_WorkflowExecution(self, domain, execution_info, **kwargs):
workflow_type = WorkflowType(
self.domain,
execution_info['workflowType']['name'],
execution_info['workflowType']['version']
)
return WorkflowExecution(
domain,
get_subkey(execution_info, ['execution', 'workflowId']), # workflow_id
run_id=get_subkey(execution_info, ['execution', 'runId']),
workflow_type=workflow_type,
status=execution_info.get('executionStatus'),
close_status=execution_info.get('closeStatus'),
tag_list=execution_info.get('tagList'),
start_timestamp=execution_info.get('startTimestamp'),
close_timestamp=execution_info.get('closeTimestamp'),
cancel_requested=execution_info.get('cancelRequested'),
parent=execution_info.get('parent'),
**kwargs
)
def get(self, workflow_id, run_id, *args, **kwargs):
""" """
try:
response = self.connection.describe_workflow_execution(
self.domain.name,
run_id,
workflow_id)
except SWFResponseError as e:
if e.error_code == 'UnknownResourceFault':
raise DoesNotExistError(e.body['message'])
raise ResponseError(e.body['message'])
execution_info = response[self._infos]
execution_config = response['executionConfiguration']
return self.to_WorkflowExecution(
self.domain,
execution_info,
task_list=get_subkey(execution_config, ['taskList', 'name']),
child_policy=execution_config.get('childPolicy'),
execution_timeout=execution_config.get('executionStartToCloseTimeout'),
decision_tasks_timeout=execution_config.get('taskStartToCloseTimeout'),
latest_activity_task_timestamp=response.get('latestActivityTaskTimestamp'),
latest_execution_context=response.get('latestExecutionContext'),
open_counts=response['openCounts'],
)
def filter(self,
status=WorkflowExecution.STATUS_OPEN, tag=None,
workflow_id=None, workflow_type_name=None,
workflow_type_version=None,
*args, **kwargs):
"""Filters workflow executions based on kwargs provided criteras
:param status: workflow executions with provided status will be kept.
Valid values are:
* ``swf.models.WorkflowExecution.STATUS_OPEN``
* ``swf.models.WorkflowExecution.STATUS_CLOSED``
:type status: string
:param tag: workflow executions containing the tag will be kept
:type tag: String
:param workflow_id: workflow executions attached to the id will be kept
:type workflow_id: String
:param workflow_type_name: workflow executions attached to the workflow type
with provided name will be kept
:type workflow_type_name: String
:param workflow_type_version: workflow executions attached to the workflow type
of the provided version will be kept
:type workflow_type_version: String
**Be aware that** querying over status allows the usage of statuses specific
kwargs
* STATUS_OPEN
:param start_latest_date: latest start or close date and time to return (in days)
:type start_latest_date: int
* STATUS_CLOSED
:param start_latest_date: workflow executions that meet the start time criteria
of the filter are kept (in days)
:type start_latest_date: int
:param start_oldest_date: workflow executions that meet the start time criteria
of the filter are kept (in days)
:type start_oldest_date: int
:param close_latest_date: workflow executions that meet the close time criteria
of the filter are kept (in days)
:type close_latest_date: int
:param close_oldest_date: workflow executions that meet the close time criteria
of the filter are kept (in days)
:type close_oldest_date: int
:param close_status: must match the close status of an execution for it
to meet the criteria of this filter.
Valid values are:
* ``CLOSE_STATUS_COMPLETED``
* ``CLOSE_STATUS_FAILED``
* ``CLOSE_STATUS_CANCELED``
* ``CLOSE_STATUS_TERMINATED``
* ``CLOSE_STATUS_CONTINUED_AS_NEW``
* ``CLOSE_TIMED_OUT``
:type close_status: string
:returns: workflow executions objects list
:rtype: list
"""
# As WorkflowTypeQuery has to be built against a specific domain
# name, domain filter is disposable, but not mandatory.
invalid_kwargs = self._validate_status_parameters(status, kwargs)
if invalid_kwargs:
err_msg = 'Invalid keyword arguments supplied: {}'.format(
', '.join(invalid_kwargs))
raise InvalidKeywordArgumentError(err_msg)
if status == WorkflowExecution.STATUS_OPEN:
oldest_date = kwargs.pop('oldest_date', 30)
else:
# The SWF docs on ListClosedWorkflowExecutions state that:
#
# "startTimeFilter and closeTimeFilter are mutually exclusive"
#
# so we must figure out if we have to add a default value for
# start_oldest_date or not.
if "close_latest_date" in kwargs or "close_oldest_date" in kwargs:
default_oldest_date = None
else:
default_oldest_date = 30
oldest_date = kwargs.pop('start_oldest_date', default_oldest_date)
# Compute a timestamp from the delta in days we got from params
# If oldest_date is blank at this point, it's because we didn't want
# it, so let's leave it blank and assume the user provided an other
# time filter.
if oldest_date:
start_oldest_date = int(datetime_timestamp(past_day(oldest_date)))
else:
start_oldest_date = None
return [self.to_WorkflowExecution(self.domain, wfe) for wfe in
self._list_items(
*args,
domain=self.domain.name,
status=status,
workflow_id=workflow_id,
workflow_name=workflow_type_name,
workflow_version=workflow_type_version,
start_oldest_date=start_oldest_date,
tag=tag,
**kwargs
)]
def _list(self, *args, **kwargs):
return self.list_workflow_executions(*args, **kwargs)
def all(self, status=WorkflowExecution.STATUS_OPEN,
start_oldest_date=30,
*args, **kwargs):
"""Fetch every workflow executions during the last `start_oldest_date`
days, with `status`
:param status: Workflow executions status filter
:type status: swf.models.WorkflowExecution.{STATUS_OPEN, STATUS_CLOSED}
:param start_oldest_date: Specifies the oldest start/close date to return.
:type start_oldest_date: integer (days)
:returns: workflow executions objects list
:rtype: list
A typical amazon response looks like:
.. code-block:: json
{
"executionInfos": [
{
"cancelRequested": "boolean",
"closeStatus": "string",
"closeTimestamp": "number",
"execution": {
"runId": "string",
"workflowId": "string"
},
"executionStatus": "string",
"parent": {
"runId": "string",
"workflowId": "string"
},
"startTimestamp": "number",
"tagList": [
"string"
],
"workflowType": {
"name": "string",
"version": "string"
}
}
],
"nextPageToken": "string"
}
"""
start_oldest_date = datetime_timestamp(past_day(start_oldest_date))
return [self.to_WorkflowExecution(self.domain, wfe) for wfe
in self._list_items(
status,
self.domain.name,
start_oldest_date=int(start_oldest_date))]
| botify-labs/python-simple-workflow | swf/querysets/workflow.py | Python | mit | 25,485 |
import tensorflow as tf
from ocnn import *
# octree-based resnet55
def network_resnet(octree, flags, training=True, reuse=None):
depth = flags.depth
channels = [2048, 1024, 512, 256, 128, 64, 32, 16, 8]
with tf.variable_scope("ocnn_resnet", reuse=reuse):
data = octree_property(octree, property_name="feature", dtype=tf.float32,
depth=depth, channel=flags.channel)
data = tf.reshape(data, [1, flags.channel, -1, 1])
with tf.variable_scope("conv1"):
data = octree_conv_bn_relu(data, octree, depth, channels[depth], training)
for d in range(depth, 2, -1):
for i in range(0, flags.resblock_num):
with tf.variable_scope('resblock_%d_%d' % (d, i)):
data = octree_resblock(data, octree, d, channels[d], 1, training)
with tf.variable_scope('max_pool_%d' % d):
data, _ = octree_max_pool(data, octree, d)
with tf.variable_scope("global_average"):
data = octree_full_voxel(data, depth=2)
data = tf.reduce_mean(data, 2)
if flags.dropout[0]:
data = tf.layers.dropout(data, rate=0.5, training=training)
with tf.variable_scope("fc2"):
logit = dense(data, flags.nout, use_bias=True)
return logit
# the ocnn in the paper
def network_ocnn(octree, flags, training=True, reuse=None):
depth = flags.depth
channels = [512, 256, 128, 64, 32, 16, 8, 4, 2]
with tf.variable_scope("ocnn", reuse=reuse):
data = octree_property(octree, property_name="feature", dtype=tf.float32,
depth=depth, channel=flags.channel)
data = tf.reshape(data, [1, flags.channel, -1, 1])
for d in range(depth, 2, -1):
with tf.variable_scope('depth_%d' % d):
data = octree_conv_bn_relu(data, octree, d, channels[d], training)
data, _ = octree_max_pool(data, octree, d)
with tf.variable_scope("full_voxel"):
data = octree_full_voxel(data, depth=2)
data = tf.layers.dropout(data, rate=0.5, training=training)
with tf.variable_scope("fc1"):
data = fc_bn_relu(data, channels[2], training=training)
data = tf.layers.dropout(data, rate=0.5, training=training)
with tf.variable_scope("fc2"):
logit = dense(data, flags.nout, use_bias=True)
return logit
def cls_network(octree, flags, training, reuse=False):
if flags.name.lower() == 'ocnn':
return network_ocnn(octree, flags, training, reuse)
elif flags.name.lower() == 'resnet':
return network_resnet(octree, flags, training, reuse)
else:
print('Error, no network: ' + flags.name)
| microsoft/O-CNN | tensorflow/script/network_cls.py | Python | mit | 2,557 |
#!/usr/bin/env python
from distutils.core import setup
from dangagearman import __version__ as version
setup(
name = 'danga-gearman',
version = version,
description = 'Client for the Danga (Perl) Gearman implementation',
author = 'Samuel Stauffer',
author_email = '[email protected]',
url = 'http://github.com/saymedia/python-danga-gearman/tree/master',
packages = ['dangagearman'],
classifiers = [
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| saymedia/python-danga-gearman | setup.py | Python | mit | 699 |
#!/usr/bin/env python
from __future__ import print_function
import sys
import re
from utils import CDNEngine
from utils import request
if sys.version_info >= (3, 0):
import subprocess as commands
import urllib.parse as urlparse
else:
import commands
import urlparse
def detect(hostname):
"""
Performs CDN detection thanks to information disclosure from server error.
Parameters
----------
hostname : str
Hostname to assess
"""
print('[+] Error server detection\n')
hostname = urlparse.urlparse(hostname).netloc
regexp = re.compile('\\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\\b')
out = commands.getoutput("host " + hostname)
addresses = regexp.finditer(out)
for addr in addresses:
res = request.do('http://' + addr.group())
if res is not None and res.status_code == 500:
CDNEngine.find(res.text.lower())
| Nitr4x/whichCDN | plugins/ErrorServerDetection/behaviors.py | Python | mit | 907 |
"""
=================================================
Modeling quasi-seasonal trends with date features
=================================================
Some trends are common enough to appear seasonal, yet sporadic enough that
approaching them from a seasonal perspective may not be valid. An example of
this is the `"end-of-the-month" effect <https://robjhyndman.com/hyndsight/monthly-seasonality/>`_.
In this example, we'll explore how we can create meaningful features that
express seasonal trends without needing to fit a seasonal model.
.. raw:: html
<br/>
"""
print(__doc__)
# Author: Taylor Smith <[email protected]>
import pmdarima as pm
from pmdarima import arima
from pmdarima import model_selection
from pmdarima import pipeline
from pmdarima import preprocessing
from pmdarima.datasets._base import load_date_example
import numpy as np
from matplotlib import pyplot as plt
print(f"pmdarima version: {pm.__version__}")
# Load the data and split it into separate pieces
y, X = load_date_example()
y_train, y_test, X_train, X_test = \
model_selection.train_test_split(y, X, test_size=20)
# We can examine traits about the time series:
pm.tsdisplay(y_train, lag_max=10)
# We can see the ACF increases and decreases rather rapidly, which means we may
# need some differencing. There also does not appear to be an obvious seasonal
# trend.
n_diffs = arima.ndiffs(y_train, max_d=5)
# Here's what the featurizer will create for us:
date_feat = preprocessing.DateFeaturizer(
column_name="date", # the name of the date feature in the X matrix
with_day_of_week=True,
with_day_of_month=True)
_, X_train_feats = date_feat.fit_transform(y_train, X_train)
print(f"Head of generated X features:\n{repr(X_train_feats.head())}")
# We can plug this X featurizer into a pipeline:
pipe = pipeline.Pipeline([
('date', date_feat),
('arima', arima.AutoARIMA(d=n_diffs,
trace=3,
stepwise=True,
suppress_warnings=True,
seasonal=False))
])
pipe.fit(y_train, X_train)
# Plot our forecasts
forecasts = pipe.predict(X=X_test)
fig = plt.figure(figsize=(16, 8))
ax = fig.add_subplot(1, 1, 1)
n_train = y_train.shape[0]
x = np.arange(n_train + forecasts.shape[0])
ax.plot(x[:n_train], y_train, color='blue', label='Training Data')
ax.plot(x[n_train:], forecasts, color='green', marker='o',
label='Predicted')
ax.plot(x[n_train:], y_test, color='red', label='Actual')
ax.legend(loc='lower left', borderaxespad=0.5)
ax.set_title('Predicted Foo')
ax.set_ylabel('# Foo')
plt.show()
# What next? Try combining different featurizers in your pipeline to enhance
# a model's predictive power.
| tgsmith61591/pyramid | examples/preprocessing/example_date_featurizer.py | Python | mit | 2,754 |
import json
from chargebee.model import Model
from chargebee import request
from chargebee import APIError
class Plan(Model):
class Tier(Model):
fields = ["starting_unit", "ending_unit", "price", "starting_unit_in_decimal", "ending_unit_in_decimal", "price_in_decimal"]
pass
class ApplicableAddon(Model):
fields = ["id"]
pass
class AttachedAddon(Model):
fields = ["id", "quantity", "billing_cycles", "type", "quantity_in_decimal"]
pass
class EventBasedAddon(Model):
fields = ["id", "quantity", "on_event", "charge_once", "quantity_in_decimal"]
pass
fields = ["id", "name", "invoice_name", "description", "price", "currency_code", "period", \
"period_unit", "trial_period", "trial_period_unit", "trial_end_action", "pricing_model", "charge_model", \
"free_quantity", "setup_cost", "downgrade_penalty", "status", "archived_at", "billing_cycles", \
"redirect_url", "enabled_in_hosted_pages", "enabled_in_portal", "addon_applicability", "tax_code", \
"hsn_code", "taxjar_product_code", "avalara_sale_type", "avalara_transaction_type", "avalara_service_type", \
"sku", "accounting_code", "accounting_category1", "accounting_category2", "accounting_category3", \
"accounting_category4", "is_shippable", "shipping_frequency_period", "shipping_frequency_period_unit", \
"resource_version", "updated_at", "giftable", "claim_url", "free_quantity_in_decimal", "price_in_decimal", \
"invoice_notes", "taxable", "tax_profile_id", "meta_data", "tiers", "applicable_addons", "attached_addons", \
"event_based_addons", "show_description_in_invoices", "show_description_in_quotes"]
@staticmethod
def create(params, env=None, headers=None):
return request.send('post', request.uri_path("plans"), params, env, headers)
@staticmethod
def update(id, params=None, env=None, headers=None):
return request.send('post', request.uri_path("plans",id), params, env, headers)
@staticmethod
def list(params=None, env=None, headers=None):
return request.send_list_request('get', request.uri_path("plans"), params, env, headers)
@staticmethod
def retrieve(id, env=None, headers=None):
return request.send('get', request.uri_path("plans",id), None, env, headers)
@staticmethod
def delete(id, env=None, headers=None):
return request.send('post', request.uri_path("plans",id,"delete"), None, env, headers)
@staticmethod
def copy(params, env=None, headers=None):
return request.send('post', request.uri_path("plans","copy"), params, env, headers)
@staticmethod
def unarchive(id, env=None, headers=None):
return request.send('post', request.uri_path("plans",id,"unarchive"), None, env, headers)
| chargebee/chargebee-python | chargebee/models/plan.py | Python | mit | 2,784 |
Subsets and Splits