metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jmikedupont2/pyannotate",
"score": 2
} |
#### File: pyannotate_tools/fixes/fix_annotate_json.py
```python
from __future__ import print_function
import json # noqa
import os
import re
from lib2to3.fixer_util import syms, touch_import
from lib2to3.pgen2 import token
from lib2to3.pytree import Base, Leaf, Node
from typing import __all__ as typing_all # type: ignore
from typing import Any, Dict, List, Optional, Tuple
try:
from typing import Text
except ImportError:
# In Python 3.5.1 stdlib, typing.py does not define Text
Text = str # type: ignore
from .fix_annotate import FixAnnotate
# Taken from mypy codebase:
# https://github.com/python/mypy/blob/745d300b8304c3dcf601477762bf9d70b9a4619c/mypy/main.py#L503
PY_EXTENSIONS = ['.pyi', '.py']
def crawl_up(arg):
# type: (str) -> Tuple[str, str]
"""Given a .py[i] filename, return (root directory, module).
We crawl up the path until we find a directory without
__init__.py[i], or until we run out of path components.
"""
dir, mod = os.path.split(arg)
mod = strip_py(mod) or mod
cwd = os.getcwd()
while dir and get_init_file(dir):
if cwd == dir:
# we are somewhere in the tree and our json file is here
break
dir, base = os.path.split(dir)
if not base:
break
if mod == '__init__' or not mod:
mod = base
else:
mod = base + '.' + mod
return dir, mod
def strip_py(arg):
# type: (str) -> Optional[str]
"""Strip a trailing .py or .pyi suffix.
Return None if no such suffix is found.
"""
for ext in PY_EXTENSIONS:
if arg.endswith(ext):
return arg[:-len(ext)]
return None
def get_init_file(dir):
# type: (str) -> Optional[str]
"""Check whether a directory contains a file named __init__.py[i].
If so, return the file's name (with dir prefixed). If not, return
None.
This prefers .pyi over .py (because of the ordering of PY_EXTENSIONS).
"""
for ext in PY_EXTENSIONS:
f = os.path.join(dir, '__init__' + ext)
if os.path.isfile(f):
return f
return None
def get_funcname(name, node):
# type: (Leaf, Node) -> Text
"""Get function name by the following rules:
- function -> function_name
- instance method -> ClassName.function_name
"""
funcname = name.value
if node.parent and node.parent.parent:
grand = node.parent.parent
if grand.type == syms.classdef:
grandname = grand.children[1]
assert grandname.type == token.NAME, repr(name)
assert isinstance(grandname, Leaf) # Same as previous, for mypy
funcname = grandname.value + '.' + funcname
return funcname
def count_args(node, results):
# type: (Node, Dict[str, Base]) -> Tuple[int, bool, bool, bool]
"""Count arguments and check for self and *args, **kwds.
Return (selfish, count, star, starstar) where:
- count is total number of args (including *args, **kwds)
- selfish is True if the initial arg is named 'self' or 'cls'
- star is True iff *args is found
- starstar is True iff **kwds is found
"""
count = 0
selfish = False
star = False
starstar = False
args = results.get('args')
if isinstance(args, Node):
children = args.children
elif isinstance(args, Leaf):
children = [args]
else:
children = []
# Interpret children according to the following grammar:
# (('*'|'**')? NAME ['=' expr] ','?)*
skip = False
previous_token_is_star = False
for child in children:
if skip:
skip = False
elif isinstance(child, Leaf):
# A single '*' indicates the rest of the arguments are keyword only
# and shouldn't be counted as a `*`.
if child.type == token.STAR:
previous_token_is_star = True
elif child.type == token.DOUBLESTAR:
starstar = True
elif child.type == token.NAME:
if count == 0:
if child.value in ('self', 'cls'):
selfish = True
count += 1
if previous_token_is_star:
star = True
elif child.type == token.EQUAL:
skip = True
if child.type != token.STAR:
previous_token_is_star = False
return count, selfish, star, starstar
class FixAnnotateJson(FixAnnotate):
needed_imports = None
def add_import(self, mod, name):
if mod == self.current_module():
return
if self.needed_imports is None:
self.needed_imports = set()
self.needed_imports.add((mod, name))
def patch_imports(self, types, node):
if self.needed_imports:
for mod, name in sorted(self.needed_imports):
touch_import(mod, name, node)
self.needed_imports = None
def current_module(self):
# TODO: cache this?
filename = self.filename
if filename.endswith('.py'):
filename = filename[:-3]
parts = filename.split(os.sep)
if parts[-1] == '__init__':
del parts[-1]
if parts[0] == '.':
del parts[0]
return '.'.join(parts)
def make_annotation(self, node, results):
name = results['name']
assert isinstance(name, Leaf), repr(name)
assert name.type == token.NAME, repr(name)
funcname = get_funcname(name, node)
res = self.get_annotation_from_stub(node, results, funcname)
return res
stub_json_file = os.getenv('TYPE_COLLECTION_JSON')
# JSON data for the current file
stub_json = None # type: List[Dict[str, Any]]
@classmethod
def init_stub_json_from_data(cls, data, filename):
cls.stub_json = data
cls.top_dir, _ = crawl_up(os.path.abspath(filename))
def init_stub_json(self):
with open(self.__class__.stub_json_file) as f:
data = json.load(f)
self.__class__.init_stub_json_from_data(data, self.filename)
def get_annotation_from_stub(self, node, results, funcname):
if not self.__class__.stub_json:
self.init_stub_json()
data = self.__class__.stub_json
# We are using relative paths in the JSON.
items = [it for it in data
if it['func_name'] == funcname and
(os.path.join(self.__class__.top_dir, it['path']) ==
os.path.abspath(self.filename))]
if len(items) > 1:
# this can happen, because of
# 1) nested functions
# 2) method decorators
# as a cheap and dirty solution we just return the nearest one by the line number
# (keep the commented-out log_message call in case we need to come back to this)
## self.log_message("%s:%d: duplicate signatures for %s (at lines %s)" %
## (items[0]['path'], node.get_lineno(), items[0]['func_name'],
## ", ".join(str(it['line']) for it in items)))
items.sort(key=lambda it: abs(node.get_lineno() - it['line']))
if items:
it = items[0]
# If the line number is too far off, the source probably drifted
# since the trace was collected; it's better to skip this node.
# (Allow some drift, since decorators also cause an offset.)
if abs(node.get_lineno() - it['line']) >= 5:
self.log_message("%s:%d: '%s' signature from line %d too far away -- skipping" %
(self.filename, node.get_lineno(), it['func_name'], it['line']))
return None
if 'signature' in it:
sig = it['signature']
arg_types = sig['arg_types']
# Passes 1-2 don't always understand *args or **kwds,
# so add '*Any' or '**Any' at the end if needed.
count, selfish, star, starstar = count_args(node, results)
for arg_type in arg_types:
if arg_type.startswith('**'):
starstar = False
elif arg_type.startswith('*'):
star = False
if star:
arg_types.append('*Any')
if starstar:
arg_types.append('**Any')
# Pass 1 omits the first arg iff it's named 'self' or 'cls',
# even if it's not a method, so insert `Any` as needed
# (but only if it's not actually a method).
if selfish and len(arg_types) == count - 1:
if self.is_method(node):
count -= 1 # Leave out the type for 'self' or 'cls'
else:
arg_types.insert(0, 'Any')
# If after those adjustments the count is still off,
# print a warning and skip this node.
if len(arg_types) != count:
self.log_message("%s:%d: source has %d args, annotation has %d -- skipping" %
(self.filename, node.get_lineno(), count, len(arg_types)))
return None
ret_type = sig['return_type']
arg_types = [self.update_type_names(arg_type) for arg_type in arg_types]
# Avoid common error "No return value expected"
if ret_type == 'None' and self.has_return_exprs(node):
ret_type = 'Optional[Any]'
# Special case for generators.
if (self.is_generator(node) and
not (ret_type == 'Iterator' or ret_type.startswith('Iterator['))):
if ret_type.startswith('Optional['):
assert ret_type[-1] == ']'
ret_type = ret_type[9:-1]
ret_type = 'Iterator[%s]' % ret_type
ret_type = self.update_type_names(ret_type)
return arg_types, ret_type
return None
def update_type_names(self, type_str):
# Replace e.g. `List[pkg.mod.SomeClass]` with
# `List[SomeClass]` and remember to import it.
return re.sub(r'[\w.]+', self.type_updater, type_str)
def type_updater(self, match):
# Replace `pkg.mod.SomeClass` with `SomeClass`
# and remember to import it.
word = match.group()
if word == '...':
return word
if '.' not in word:
# Assume it's either builtin or from `typing`
if word in typing_all:
self.add_import('typing', word)
return word
mod, name = word.rsplit('.', 1)
self.add_import(mod, name)
return name
``` |
{
"source": "jmikedupont2/pythoscope",
"score": 3
} |
#### File: pythoscope/bytecode_tracer/code_rewriting_importer.py
```python
import imp
from . import imputil
import marshal
import os
import struct
import sys
from types import CodeType
# byte-compiled file suffic character
_suffix_char = __debug__ and 'c' or 'o'
# byte-compiled file suffix
_suffix = '.py' + _suffix_char
# the C_EXTENSION suffixes
_c_suffixes = filter(lambda x: x[2] == imp.C_EXTENSION, imp.get_suffixes())
def _timestamp(pathname):
"Return the file modification time as a Long."
try:
s = os.stat(pathname)
except OSError:
return None
return long(s[8])
def _compile(path):
"Read and compile Python source code from file."
f = open(path)
c = f.read()
f.close()
return compile(c, path, 'exec')
def _fs_import(dir, modname, fqname):
"Fetch a module from the filesystem."
pathname = os.path.join(dir, modname)
if os.path.isdir(pathname):
values = { '__pkgdir__' : pathname, '__path__' : [ pathname ] }
ispkg = 1
pathname = os.path.join(pathname, '__init__')
else:
values = { }
ispkg = 0
# look for dynload modules
for desc in _c_suffixes:
file = pathname + desc[0]
try:
fp = open(file, desc[1])
except IOError:
pass
else:
module = imp.load_module(fqname, fp, file, desc)
values['__file__'] = file
return 0, module, values
t_py = _timestamp(pathname + '.py')
t_pyc = _timestamp(pathname + _suffix)
if t_py is None and t_pyc is None:
return None
code = None
if t_py is None or (t_pyc is not None and t_pyc >= t_py):
file = pathname + _suffix
f = open(file, 'rb')
if f.read(4) == imp.get_magic():
t = struct.unpack('<I', f.read(4))[0]
if t == t_py:
code = marshal.load(f)
f.close()
if code is None:
file = pathname + '.py'
code = _compile(file)
values['__file__'] = file
return ispkg, code, values
class PathImporter(imputil.Importer):
def __init__(self, path, callback):
self.path = path
self.callback = callback
def rewrite(self, retvals):
if isinstance(retvals, tuple) and type(retvals[1]) == CodeType:
return (retvals[0], self.callback(retvals[1]), retvals[2])
return retvals
def get_code(self, parent, modname, fqname):
if parent:
# we are looking for a module inside of a specific package
return self.rewrite(_fs_import(parent.__pkgdir__, modname, fqname))
# scan sys.path, looking for the requested module
for dir in self.path:
if isinstance(dir, str):
result = _fs_import(dir, modname, fqname)
if result:
return self.rewrite(result)
# not found
return None
class ImportManager(imputil.ImportManager):
def _import_hook(self, fqname, globals=None, locals=None, fromlist=None, level=-1):
# TODO: support level argument added in Python 2.5
return imputil.ImportManager._import_hook(self, fqname, globals, locals, fromlist)
import_manager = ImportManager()
def install(callback):
"Install callback as a code-rewriting function for each imported module."
import_manager.install()
sys.path.insert(0, PathImporter(sys.path, callback))
sys.path.insert(0, imputil.BuiltinImporter())
def uninstall():
import_manager.uninstall()
```
#### File: pythoscope/generator/objects_namer.py
```python
from pythoscope.generator.dependencies import sorted_by_timestamp
from pythoscope.generator.lines import Assign
from pythoscope.serializer import SerializedObject
from pythoscope.util import all_of_type, key_for_value, underscore
# :: SerializedObject -> str
def get_name_base_for_object(obj):
common_names = {'list': 'alist',
'dict': 'adict',
'array.array': 'array',
'datetime': 'dt', # we can't name it 'datetime', because that is module's name
'types.FunctionType': 'function',
'types.GeneratorType': 'generator'}
return common_names.get(obj.type_name, underscore(obj.type_name))
# :: [str], str -> str
def get_next_name(names, base):
"""Figure out a new name starting with base that doesn't appear in given
list of names.
>>> get_next_name(["alist", "adict1", "adict2"], "adict")
'adict3'
"""
base_length = len(base)
def has_right_base(name):
return name.startswith(base)
def get_index(name):
return int(name[base_length:])
return base + str(max(list(map(get_index, list(filter(has_right_base, names))))) + 1)
# :: SerializedObject, {SerializedObject: str}, bool -> None
def assign_name_to_object(obj, assigned_names, rename=True):
"""Assign a right name for given object.
May reassign an existing name for an object as a side effect, unless
`rename` is False.
"""
if obj in assigned_names:
return
base = get_name_base_for_object(obj)
other_obj = key_for_value(assigned_names, base)
if other_obj:
# Avoid overlapping names by numbering objects with the same base.
if rename:
assigned_names[other_obj] = base+"1"
assigned_names[obj] = base+"2"
elif base+"1" in list(assigned_names.values()):
# We have some objects already numbered, insert a name with a new index.
assigned_names[obj] = get_next_name(list(assigned_names.values()), base)
else:
# It's the first object with that base.
assigned_names[obj] = base
# :: ([SerializedObject], {SerializedObject: str}), bool -> None
def assign_names_to_objects(objects, names, rename=True):
"""Modifies names dictionary as a side effect.
"""
for obj in sorted_by_timestamp(objects):
assign_name_to_object(obj, names, rename)
# :: [Event] -> [SerializedObject]
def objects_only(events):
return all_of_type(events, SerializedObject)
# :: [Event] -> [Event]
def name_objects_on_timeline(events):
names = {}
assign_names_to_objects(objects_only(events), names)
def map_object_to_assign(event):
if isinstance(event, SerializedObject):
return Assign(names[event], event, event.timestamp)
return event
return list(map(map_object_to_assign, events))
```
#### File: pythoscope/inspector/static.py
```python
import re
import types
import sys
from pythoscope.astvisitor import descend, ASTVisitor
from pythoscope.astbuilder import parse, ParseError
from pythoscope.logger import log
from pythoscope.store import Class, Function, Method, TestClass,TestMethod
from pythoscope.util import all_of_type, is_generator_code, \
read_file_contents, compile_without_warnings
def is_test_class(name, bases):
"""Look at the name and bases of a class to determine whether it's a test
class or not.
>>> is_test_class("TestSomething", [])
True
>>> is_test_class("SomethingElse", [])
False
>>> is_test_class("ItDoesntLookLikeOne", ["unittest.TestCase"])
True
"""
return name.startswith("Test") or name.endswith("Test") \
or "unittest.TestCase" in bases
def unindent(string):
"""Remove the initial part of whitespace from string.
>>> unindent("1 + 2 + 3\\n")
'1 + 2 + 3'
>>> unindent(" def fun():\\n return 42\\n")
'def fun():\\n return 42'
>>> unindent("\\n def fun():\\n return 42\\n")
'def fun():\\n return 42'
>>> unindent(" def fun():\\n return 42\\n\\n")
'def fun():\\n return 42'
"""
string = re.sub(r'^\n*', '', string.rstrip()) # ignore leading and trailing newlines
match = re.match(r'^([\t ]+)', string)
if not match:
return string
whitespace = match.group(1)
lines = []
for line in string.splitlines(True):
if line.startswith(whitespace):
lines.append(line[len(whitespace):])
else:
return string
return ''.join(lines)
def function_code_from_definition(definition):
"""Return a code object of a given function definition.
Can raise SyntaxError if the definition is not valid.
"""
consts = compile_without_warnings(unindent(str(definition))).co_consts
return all_of_type(consts, types.CodeType)[0]
def is_generator_definition(definition):
"""Return True if given piece of code is a generator definition.
>>> is_generator_definition("def f():\\n return 1\\n")
False
>>> is_generator_definition("def g():\\n yield 2\\n")
True
>>> is_generator_definition(" def indented_gen():\\n yield 3\\n")
True
>>> is_generator_definition("\\n def indented_gen():\\n yield 3\\n")
True
"""
try:
return is_generator_code(function_code_from_definition(definition))
except SyntaxError:
# This most likely means given code used "return" with argument
# inside generator.
return False
def create_definition(name, args, code, definition_type):
return definition_type(name, args=args, code=code,
is_generator=is_generator_definition(code))
class ModuleVisitor(ASTVisitor):
def __init__(self):
ASTVisitor.__init__(self)
self.imports = []
self.objects = []
self.main_snippet = None
self.last_import = None
self.past_imports = False
def visit_class(self, name, bases, body):
visitor = descend(body.children, ClassVisitor)
if is_test_class(name, bases):
methods = [TestMethod(n, c) for (n, a, c) in visitor.methods]
klass = TestClass(name=name, test_cases=methods, code=body)
else:
methods = [create_definition(n, a, c, Method) for (n, a, c) in visitor.methods]
klass = Class(name=name, methods=methods, bases=bases)
self.objects.append(klass)
self.past_imports = True
def visit_function(self, name, args, body):
self.objects.append(create_definition(name, args, body, Function))
self.past_imports = True
def visit_lambda_assign(self, name, args):
self.objects.append(Function(name, args=args))
self.past_imports = True
def visit_import(self, names, import_from, body):
if import_from:
for name in names:
self.imports.append((import_from, name))
else:
self.imports.extend(names)
if not self.past_imports:
self.last_import = body
def visit_main_snippet(self, body):
self.main_snippet = body
self.past_imports = True
class ClassVisitor(ASTVisitor):
def __init__(self):
ASTVisitor.__init__(self)
self.methods = []
def visit_class(self, name, bases, body):
# Ignore definitions of subclasses.
pass
def visit_function(self, name, args, body):
self.methods.append((name, args, body))
def inspect_module(project, path):
return inspect_code(project, path, read_file_contents(path))
# :: (Project, string, string) -> Module
def inspect_code(project, path, code):
try:
tree = parse(code)
except ParseError as e:
log.warning("Inspection of module %s failed with error %s" % (path,e))
return project.create_module(path, errors=[e]) #try to skip
visitor = descend(tree, ModuleVisitor)
# We assume that all test classes in this module has dependencies on
# all imports the module contains.
for test_class in [o for o in visitor.objects if isinstance(o, TestClass)]:
# We gathered all imports in a single list, but import lists of those
# classes may diverge in time, so we don't want to share their
# structure.
test_class.imports = visitor.imports[:]
return project.create_module(path, code=tree, objects=visitor.objects,
imports=visitor.imports, main_snippet=visitor.main_snippet,
last_import=visitor.last_import)
```
#### File: pythoscope/pythoscope/util.py
```python
import dill as pickle #better than pickle
import gc
import itertools
import operator
import os
import re
import sys
import traceback
import types
import warnings
from pythoscope.py_wrapper_object import get_wrapper_self
from functools import reduce
def compact(lst):
"Remove all occurences of None from the given list."
return [x for x in lst if x is not None]
def counted(objects):
"""Count how many times each object appears in a list and return
list of (object, count) tuples.
>>> counted(['a', 'b', 'c', 'a', 'b', 'a'])
[('a', 3), ('b', 2), ('c', 1)]
>>> counted([])
[]
"""
return [(obj, len(list(group))) for obj, group in itertools.groupby(sorted(objects))]
def camelize(name):
"""Covert name into CamelCase.
>>> camelize('underscore_name')
'UnderscoreName'
>>> camelize('AlreadyCamelCase')
'AlreadyCamelCase'
>>> camelize('')
''
"""
def upcase(match):
return match.group(1).upper()
return re.sub(r'(?:^|_)(.)', upcase, name)
def underscore(name):
"""Convert name into underscore_name.
>>> underscore('CamelCase')
'camel_case'
>>> underscore('already_underscore_name')
'already_underscore_name'
>>> underscore('BigHTMLClass')
'big_html_class'
>>> underscore('')
''
"""
if name and name[0].isupper():
name = name[0].lower() + name[1:]
def capitalize(match):
string = match.group(1).capitalize()
return string[:-1] + string[-1].upper()
def underscore(match):
return '_' + match.group(1).lower()
name = re.sub(r'([A-Z]+)', capitalize, name)
return re.sub(r'([A-Z])', underscore, name)
def pluralize(word, count):
"""Depending on the counter, return a singular or a plural form of the
given word.
>>> pluralize("word", 1)
'one word'
>>> pluralize("word", 2)
'2 words'
"""
if count == 1:
return "one %s" % word
else:
return "%d %ss" % (count, word)
# :: string -> string
def string2id(string):
"""Remove from string all characters that cannot be used in an identifier.
"""
return re.sub(r'[^a-zA-Z0-9_]', '', re.sub(r'\s+', '_', string.strip()))
# :: string -> string
def string2filename(string):
"""Remove from string all characters that cannot be used in a file name.
>>> string2filename('file.txt')
'file.txt'
>>> string2filename(os.path.join('package', 'module.py'))
'package_module.py'
>>> string2filename(os.path.join('directory with spaces', 'file.with.dots'))
'directory with spaces_file.with.dots'
"""
return re.sub(re.escape(os.path.sep), '_', string)
def file_mode(base, binary):
if binary:
return base + 'b'
return base
def read_file_contents(filename, binary=False):
for encoding in ('utf-8',None): # TODO: clean this ugly shit
try:
with open(filename, file_mode('r', binary), encoding=encoding) as fd:
contents = fd.read()
except:
continue
else:
break
return contents
def write_content_to_file(string, filename, binary=False):
with open(filename, file_mode('w', binary)) as fd:
fd.write(string)
fd.close()
def all_of_type(objects, type):
"""Return all objects that are instances of a given type.
"""
return [o for o in objects if isinstance(o, type)]
def max_by_not_zero(func, collection):
"""Return the element of a collection for which func returns the highest
value, greater than 0.
Return None if there is no such value.
>>> max_by_not_zero(len, ["abc", "d", "ef"])
'abc'
>>> max_by_not_zero(lambda x: x, [0, 0, 0, 0]) is None
True
>>> max_by_not_zero(None, []) is None
True
"""
if not collection:
return None
def annotate(element):
return (func(element), element)
highest = max(list(map(annotate, collection)),key=lambda pair:pair[0])
if highest and highest[0] > 0:
return highest[1]
else:
return None
def get_names(objects):
return list([c.name for c in objects])
def map_values(function, dictionary):
new_dictionary = {}
for key, value in dictionary.items():
new_dictionary[key] = function(value)
return new_dictionary
class DirectoryException(Exception):
pass
def ensure_directory(directory):
"""Make sure given directory exists, creating it if necessary.
"""
if os.path.exists(directory):
if not os.path.isdir(directory):
raise DirectoryException("Destination is not a directory.")
else:
os.makedirs(directory)
def get_last_modification_time(path):
try:
# Casting to int, because we don't need better resolution anyway and it
# eases testing on different OSes.
return int(os.path.getmtime(path))
except OSError:
# File may not exist, in which case it was never modified.
return 0
def starts_with_path(path, prefix):
"""Return True if given path starts with given prefix and False otherwise.
"""
return os.path.realpath(path).startswith(os.path.realpath(prefix))
def extract_subpath(path, prefix):
"""Remove prefix from given path to generate subpath, so the following
correspondence is preserved:
path <=> os.path.join(prefix, subpath)
in terms of physical path (i.e. not necessarily strict string
equality).
"""
prefix = os.path.realpath(prefix)
prefix_length = len(prefix)
if not prefix.endswith(os.path.sep):
prefix_length += 1
return os.path.realpath(path)[prefix_length:]
def directories_under(path):
"""Return names of directories under given path (not recursive).
"""
for entry in os.listdir(path):
if os.path.isdir(os.path.join(path, entry)):
yield entry
def findfirst(pred, seq):
"""Return the first element of given sequence that matches predicate.
"""
for item in seq:
if pred(item):
return item
def flatten(lst):
"""Flatten given list.
>>> flatten([[1, 2, 3], [4, 5], [6, 7], [8]])
[1, 2, 3, 4, 5, 6, 7, 8]
"""
return list(itertools.chain(*lst))
# :: [set] -> set
def union(*sets):
"""Return a union of all the given sets.
"""
if len(sets) == 0:
return set()
# Since 2.6 set.union accepts multiple input iterables.
if sys.version_info >= (2, 6):
return set.union(*sets)
else:
return reduce(operator.or_, sets, set())
# :: dict, object -> object
def key_for_value(dictionary, value):
"""Return the first key of dictionary that maps to given value.
>>> key_for_value({'a': 1, 'b': 2}, 2)
'b'
>>> key_for_value({}, 1)
"""
for k, v in dictionary.items():
if v == value:
return k
def get_generator_from_frame(frame):
generators = all_of_type(gc.get_referrers(frame), types.GeneratorType)
if generators:
return generators[0]
def is_generator_code(code):
return code.co_flags & 0x20 != 0
def generator_has_ended(generator):
"""Return True if the generator has been exhausted and False otherwise.
>>> generator_has_ended(1)
Traceback (most recent call last):
...
TypeError: argument is not a generator
"""
if not isinstance(generator, types.GeneratorType):
raise TypeError("argument is not a generator")
return _generator_has_ended(generator)
try:
from _util import _generator_has_ended
except ImportError:
if sys.version_info < (2, 5):
# In Python 2.4 and earlier we can't reliably tell if a generator
# is active or not without going to the C level. We assume it
# has ended, as it will be true most of the time in our use case.
def _generator_has_ended(generator):
return True
generator_has_ended.unreliable = True
else:
# This is a hack that uses the fact that in Python 2.5 and higher
# generator frame is garbage collected once the generator has ended.
def _generator_has_ended(generator):
return generator.gi_frame is None
wrapper_type = [].__len__.__class__
def is_method_wrapper(obj):
return isinstance(obj, wrapper_type)
# :: object -> object | None
def get_self_from_method(method):
# Since Python 2.5 even wrapper methods have __self__, so let's use that
# when we can. For earlier versions we have to go deeper.
if hasattr(method, '__self__'):
return method.__self__
elif is_method_wrapper(method):
return get_wrapper_self(method)
def compile_without_warnings(stmt):
"""Compile single interactive statement with Python interpreter warnings
disabled.
"""
warnings.simplefilter('ignore')
code = compile(stmt, '', 'single')
warnings.resetwarnings()
return code
def callers_name():
return sys._getframe(2).f_code.co_name
def type_names(types):
if isinstance(types, tuple):
return '/'.join(map(type_names, types))
return types.__name__
def assert_argument_type(obj, expected_type):
if not isinstance(obj, expected_type):
raise TypeError("%s() should be called with a %s argument, not %s" %
(callers_name(), type_names(expected_type), obj))
def quoted_block(text):
return ''.join(["> %s" % line for line in text.splitlines(True)])
def class_of(obj):
if hasattr(obj, "__class__"):
return obj.__class__
return type(obj)
def class_name(obj):
return class_of(obj).__name__
def module_name(obj):
return class_of(obj).__module__
def module_path_to_name(module_path, newsep="_"):
return re.sub(r'(%s__init__)?\.py$' % re.escape(os.path.sep), '', module_path).\
replace(os.path.sep, newsep)
def last_traceback():
return "".join(traceback.format_tb(sys.exc_info()[2]))
def last_exception_as_string():
exc_type, exc_value = sys.exc_info()[:2]
# Special case for string exceptions.
if isinstance(exc_type, str):
return exc_type
else:
return repr(exc_value)
# Regular expressions helpers.
RePatternType = type(re.compile(''))
def regexp_flags_as_string(flags):
"""Return an expression in string form that corresponds to given set of
regexp flags.
"""
strings = []
if flags & re.IGNORECASE:
strings.append('re.IGNORECASE')
if flags & re.LOCALE:
strings.append('re.LOCALE')
if flags & re.MULTILINE:
strings.append('re.MULTILINE')
if flags & re.DOTALL:
strings.append('re.DOTALL')
if flags & re.VERBOSE:
strings.append('re.VERBOSE')
if flags & re.UNICODE:
strings.append('re.UNICODE')
return " | ".join(strings)
def load_pickle_from(path):
with open(path, 'rb') as f:
obj = pickle.load(f)
return obj
```
#### File: pythoscope/test/test_acceptance.py
```python
from pythoscope.inspector import inspect_project
from pythoscope.generator import add_tests_to_project
from pythoscope.util import read_file_contents, write_content_to_file
from nose import SkipTest
from .assertions import *
from .helper import get_test_module_contents, CapturedLogger, \
ProjectInDirectory, putfile, TempDirectory, read_data
class TestStaticAnalysis(CapturedLogger, TempDirectory):
def test_generates_test_stubs(self):
expected_result = read_data("static_analysis_output.py")
project = ProjectInDirectory(self.tmpdir)
module_path = putfile(project.path, "module.py", read_data("static_analysis_module.py"))
inspect_project(project)
add_tests_to_project(project, [module_path], 'unittest')
result = get_test_module_contents(project)
assert_equal_strings(expected_result, result)
class TestAppendingTestClasses(CapturedLogger, TempDirectory):
def test_appends_test_classes_to_existing_test_modules(self):
self._test_appending("appending_test_cases_module_modified.py",
"appending_test_cases_output_expected.py")
def test_appends_test_methods_to_existing_test_classes(self):
self._test_appending("appending_test_cases_module_added_method.py",
"appending_test_cases_added_method_output_expected.py")
def _test_appending(self, modified_input, expected_output):
project = ProjectInDirectory(self.tmpdir)
module_path = putfile(project.path, "module.py", read_data("appending_test_cases_module_initial.py"))
test_module_path = putfile(project.path, "test_module.py", read_data("appending_test_cases_output_initial.py"))
# Analyze the project with an existing test module.
inspect_project(project)
# Filesystem stat has resolution of 1 second, and we don't want to
# sleep in a test, so we just fake the original files creation time.
project["module"].created = 0
project["test_module"].created = 0
# Modify the application module and analyze it again.
putfile(project.path, "module.py", read_data(modified_input))
inspect_project(project)
# Regenerate the tests.
add_tests_to_project(project, [module_path], 'unittest')
project.save()
assert_length(project.get_modules(), 2)
result = read_file_contents(test_module_path)
expected_result = read_data(expected_output)
assert_equal_strings(expected_result, result)
class TestAcceptanceWithPointOfEntry(CapturedLogger, TempDirectory):
def execute_with_point_of_entry_and_assert(self, id):
expected_result = read_data("%s_output.py" % id)
project = ProjectInDirectory(self.tmpdir).with_points_of_entry(["poe.py"])
module_path = putfile(project.path, "module.py", read_data("%s_module.py" % id))
write_content_to_file(read_data("generic_acceptance_poe.py"), project.path_for_point_of_entry("poe.py"))
inspect_project(project)
add_tests_to_project(project, [module_path], 'unittest')
result = get_test_module_contents(project)
assert_equal_strings(expected_result, result)
class TestObjectsIdentityPreservation(TestAcceptanceWithPointOfEntry):
def test_preserves_identity_of_objects(self):
self.execute_with_point_of_entry_and_assert("objects_identity")
class TestSideEffectsCaptureAndGeneration(TestAcceptanceWithPointOfEntry):
def test_captures_and_generates_tests_for_code_with_side_effects_on_lists(self):
self.execute_with_point_of_entry_and_assert("side_effects_on_lists")
class TestGlobalVariables(TestAcceptanceWithPointOfEntry):
def test_handles_global_variables(self):
self.execute_with_point_of_entry_and_assert("global_variables")
class TestAttributesRebind(TestAcceptanceWithPointOfEntry):
def test_handles_attribute_rebind(self):
self.execute_with_point_of_entry_and_assert("attributes_rebind")
```
#### File: pythoscope/test/test_static_inspector.py
```python
import sys
from nose import SkipTest
from pythoscope.inspector.static import inspect_code
from pythoscope.astbuilder import regenerate
from pythoscope.store import code_of
from pythoscope.util import get_names
from .assertions import *
from .helper import EmptyProject
new_style_class = """
class AClass(object):
def amethod(self):
pass
"""
old_style_class = """
class OldStyleClass:
def amethod(self):
pass
"""
class_without_methods = """
class ClassWithoutMethods(object):
pass
"""
stand_alone_function = """
def a_function():
pass
"""
inner_classes_and_function = """
def outer_function():
def inner_function():
pass
class InnerClass(object):
pass
class OuterClass(object):
class AnotherInnerClass(object):
pass
"""
class_with_methods = """
class ClassWithThreeMethods(object):
def first_method(self):
pass
def second_method(self, x):
pass
def third_method(self, x, y):
pass
"""
syntax_error = """
a b c d e f g
"""
indentation_error = """
def answer():
42
"""
definitions_inside_try_except = """
try:
def inside_function(): pass
class InsideClass(object): pass
except:
pass
"""
definitions_inside_if = """
if True:
def inside_function(): pass
class InsideClass(object): pass
"""
definitions_inside_while = """
while True:
def inside_function(): pass
class InsideClass(object): pass
"""
definitions_inside_for = """
for x in range(1):
def inside_function(): pass
class InsideClass(object): pass
"""
definitions_inside_with = """
from __future__ import with_statement
with x:
def inside_function(): pass
class InsideClass(object): pass
"""
lambda_definition = """
lambda_function = lambda x: not x
"""
class_without_parents = """
class ClassWithoutParents:
pass
"""
class_with_one_parent = """
class ClassWithOneParent(object):
pass
"""
class_with_two_parents = """
class ClassWithTwoParents(Mother, Father):
pass
"""
class_inheriting_from_some_other_module_class = """
class SomeClass(othermodule.Class):
pass
"""
class_with_inner_class = """
class OuterClass(object):
def __init__(self):
pass
def outer_class_method(self):
pass
class InnerClass(object):
def __init__(self):
pass
def inner_class_method(self):
pass
"""
two_test_classes = """import unittest
class FirstTestClass(unittest.TestCase):
def test_this(self):
pass
def test_that(self):
pass
class TestMore:
def test_more(self):
pass
"""
strange_test_code = "# Tests will be here someday"
nose_style_test_functions = """import nose
def test_this():
pass
def test_that():
pass
"""
application_module_with_test_class = """import os
import unittest
def fib(x):
if x in [0,1]:
return x
else:
return fib(x-2) + fib(x-1)
class TestFib(unittest.TestCase):
def test_one(self):
assert fib(1) == 1
def test_two(self):
assert fib(2) == 1
def test_three(self):
assert fib(3) == 2
if __name__ == '__main__':
unittest.main()
"""
standard_generator_definition = """def gen(x):
yield x
yield x + 1
"""
function_returning_generator_object = """def fun():
def gen():
yield 1
return gen()
"""
class_with_method_generator_definition = """class SomeClass(object):
def method_generator(self):
yield 2
"""
function_with_default_argument_value = """def nofun(to='day'):
return 'home'
"""
function_with_one_argument = """def fun(arg):
pass
"""
function_with_many_arguments = """def fun3(arg1, arg2, arg3):
pass
"""
function_with_many_arguments_and_default_values = """def optfun(arg, opt1=123, opt2='abc'):
pass
"""
function_with_positional_and_keyword_arguments = """def morefun(arg, *args, **kwds):
pass
"""
functions_with_nested_arguments = """def nestfun((a, b), c):
pass
def nestfun2(a, (b, c)):
pass
def nestfun3(a, (b, c), d):
pass
"""
class TestStaticInspector:
def _inspect_code(self, code):
return inspect_code(EmptyProject(), "module.py", code)
def test_inspects_top_level_classes(self):
module = self._inspect_code(new_style_class)
assert_single_class(module, "AClass")
def test_inspects_top_level_functions(self):
module = self._inspect_code(stand_alone_function)
assert_single_function(module, "a_function")
def test_doesnt_count_methods_as_functions(self):
module = self._inspect_code(new_style_class)
assert_length(module.functions, 0)
def test_inspects_old_style_classes(self):
module = self._inspect_code(old_style_class)
assert_single_class(module, "OldStyleClass")
def test_inspects_classes_without_methods(self):
module = self._inspect_code(class_without_methods)
assert_single_class(module, "ClassWithoutMethods")
def test_ignores_inner_classes_and_functions(self):
module = self._inspect_code(inner_classes_and_function)
assert_single_class(module, "OuterClass")
assert_single_function(module, "outer_function")
def test_inspects_methods_of_a_class(self):
module = self._inspect_code(class_with_methods)
assert_equal(["first_method", "second_method", "third_method"],
get_names(module.classes[0].methods))
def test_collector_handles_syntax_errors(self):
module = self._inspect_code(syntax_error)
assert_length(module.errors, 1)
def test_collector_handles_indentation_errors(self):
module = self._inspect_code(indentation_error)
assert_length(module.errors, 1)
def test_inspects_functions_and_classes_inside_other_blocks(self):
suite = [definitions_inside_try_except, definitions_inside_if,
definitions_inside_while, definitions_inside_for]
for case in suite:
module = self._inspect_code(case)
assert_single_class(module, "InsideClass")
assert_single_function(module, "inside_function")
def test_inspects_functions_and_classes_inside_with(self):
# With statement was introduced in Python 2.5, so skip this test for
# earlier versions.
if sys.version_info < (2, 5):
raise SkipTest
module = self._inspect_code(definitions_inside_with)
assert_single_class(module, "InsideClass")
assert_single_function(module, "inside_function")
def test_inspects_functions_defined_using_lambda(self):
module = self._inspect_code(lambda_definition)
assert_single_function(module, "lambda_function")
def test_inspects_class_bases(self):
suite = [class_without_parents, class_with_one_parent, class_with_two_parents]
expected_results = [[], ["object"], ["Mother", "Father"]]
for case, expected in zip(suite, expected_results):
module = self._inspect_code(case)
assert_equal(expected, module.classes[0].bases)
def test_correctly_inspects_bases_from_other_modules(self):
module = self._inspect_code(class_inheriting_from_some_other_module_class)
assert_length(module.objects, 1)
assert_equal(["othermodule.Class"], module.objects[0].bases)
def test_correctly_inspects_calculated_bases(self):
class_with_namedtuple = "import collections\n\n" +\
"class ClassWithNamedtuple(collections.namedtuple('Point', 'x y')):\n" +\
" pass\n"
module = self._inspect_code(class_with_namedtuple)
assert_single_class(module, "ClassWithNamedtuple")
assert_equal(["collections.namedtuple('Point', 'x y')"], module.objects[0].bases)
def test_ignores_existance_of_any_inner_class_methods(self):
module = self._inspect_code(class_with_inner_class)
assert_single_class(module, "OuterClass")
assert_equal(["__init__", "outer_class_method"],
get_names(module.classes[0].methods))
def test_inspects_test_modules(self):
module = self._inspect_code(two_test_classes)
assert_equal(["unittest"], module.imports)
assert_equal(["FirstTestClass", "TestMore"],
get_names(module.test_classes))
assert_equal(["test_this", "test_that"],
get_names(module.test_classes[0].test_cases))
assert_equal(["test_more"],
get_names(module.test_classes[1].test_cases))
def test_recognizes_unrecognized_chunks_of_test_code(self):
module = self._inspect_code(strange_test_code)
assert_equal(strange_test_code, module.get_content())
def test_recognizes_nose_style_test_code(self):
module = self._inspect_code(nose_style_test_functions)
assert_equal(["nose"], module.imports)
assert_equal(nose_style_test_functions, module.get_content())
assert_equal(None, code_of(module, 'main_snippet'))
def test_inspects_test_classes_inside_application_modules(self):
module = self._inspect_code(application_module_with_test_class)
assert_equal_sets(["os", "unittest"], module.imports)
assert_equal(application_module_with_test_class, module.get_content())
assert code_of(module, 'main_snippet') is not None
assert_equal(["TestFib"], get_names(module.test_classes))
assert_equal(["fib"], get_names(module.functions))
def test_recognizes_generator_definitions(self):
module = self._inspect_code(standard_generator_definition)
assert_single_function(module, "gen")
assert module.functions[0].is_generator
def test_treats_functions_returning_generator_objects_as_functions(self):
module = self._inspect_code(function_returning_generator_object)
assert_single_function(module, "fun")
assert not module.functions[0].is_generator
def test_recognizes_generator_methods(self):
module = self._inspect_code(class_with_method_generator_definition)
method = module.classes[0].methods[0]
assert method.is_generator
assert_equal("method_generator", method.name)
def test_handles_functions_without_arguments(self):
module = self._inspect_code(stand_alone_function)
assert_single_function(module, "a_function", args=[])
def test_handles_functions_with_one_argument(self):
module = self._inspect_code(function_with_one_argument)
assert_single_function(module, "fun", args=['arg'])
def test_handles_functions_with_many_arguments(self):
module = self._inspect_code(function_with_many_arguments)
assert_single_function(module, "fun3", args=['arg1', 'arg2', 'arg3'])
def test_handles_functions_with_default_argument_values(self):
module = self._inspect_code(function_with_default_argument_value)
assert_single_function(module, "nofun", args=['to'])
def test_handles_functions_with_many_arguments_and_default_values(self):
module = self._inspect_code(function_with_many_arguments_and_default_values)
assert_single_function(module, "optfun", args=['arg', 'opt1', 'opt2'])
def test_handles_functions_with_positional_and_keyword_arguments(self):
module = self._inspect_code(function_with_positional_and_keyword_arguments)
assert_single_function(module, "morefun", args=['arg', '*args', '**kwds'])
def test_handles_arguments_of_lambda_definitions(self):
module = self._inspect_code(lambda_definition)
assert_single_function(module, "lambda_function", args=['x'])
def test_handles_functions_with_nested_arguments(self):
info = self._inspect_code(functions_with_nested_arguments)
assert_length(info.functions, 3)
assert_function(info.functions[0], "nestfun", [('a', 'b'), 'c'])
assert_function(info.functions[1], "nestfun2", ['a', ('b', 'c')])
assert_function(info.functions[2], "nestfun3", ['a', ('b', 'c'), 'd'])
``` |
{
"source": "Jmikelittle/ckanext-canada",
"score": 4
} |
#### File: ckanext/canada/urlsafe.py
```python
import re
def url_part_escape(orig):
"""
simple encoding for url-parts where all non-alphanumerics are
wrapped in e.g. _xxyyzz_ blocks w/hex UTF-8 xx, yy, zz values
used for safely including arbitrary unicode as part of a url path
all returned characters will be in [a-zA-Z0-9_-]
"""
return '_'.join(
s.encode('hex') if i % 2 else s
for i, s in enumerate(
re.split('([^-a-zA-Z0-9]+)', orig.encode('utf-8'))))
def url_part_unescape(urlpart):
"""
reverse url_part_escape
"""
return u''.join(
s.decode('hex').decode('utf-8') if i % 2 else s.decode('utf-8')
for i, s in enumerate(urlpart.split('_')))
``` |
{
"source": "jmikeowen/Spheral",
"score": 2
} |
#### File: packages/m-aneos/package.py
```python
from spack import *
class MAneos(MakefilePackage):
"""M-Aneos"""
homepage = "https://github.com/isale-code/M-ANEOS"
url = "https://github.com/isale-code/M-ANEOS/releases/download/v1.0beta/M-ANEOS-v1.0.tar.gz"
version('1.0', sha256='3101b113fa59a8b615ec7e9e25479ab9c10d3e544173df0307bb675872773d31')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
build_directory = 'src'
#patch('remove-mpiposix.patch', when='@4.8:4.10.2')
def edit(self, spec, prefix):
makefile = FileFilter('src/Makefile')
makefile.filter(r'^\s*FC\s*=.*', 'FC = ' + spack_fc)
makefile.filter(r'^\s*FCFLAGS\s*=.*', 'FCFLAGS = ' + '-O3 -fPIC')
def install(self, spec, prefix):
mkdir(prefix.lib)
mkdir(prefix.input)
install('src/libaneos.a', prefix.lib)
install('input/dunite_.input', prefix.input)
install('input/quartz_.input', prefix.input)
install('input/serpent.input', prefix.input)
```
#### File: packages/py-polyclipper/package.py
```python
from spack import *
class PyPolyclipper(CMakePackage, PythonPackage):
"""Polyclipper"""
homepage = "https://pypi.org/project/PYB11Generator/"
url = "https://github.com/LLNL/PolyClipper/archive/refs/tags/v1.2.3.zip"
git = "https://github.com/LLNL/PolyClipper"
maintainers = ['mdavis36','jmikeowen']
version('1.2.3', sha256='366e547bc343033c760727b6cdbf34a304c27bc769a208e9bfaeec42c92dba96')
variant('mpi', default=False, description='Enable MPI Support.')
variant('openmp', default=True, description='Enable OpenMP Support.')
variant('docs', default=False, description='Enable building Docs.')
depends_on('mpi', when='+mpi')
depends_on('blt')
depends_on('py-pybind11')
depends_on('py-pyb11generator')
depends_on('py-decorator')
def cmake_args(self):
spec = self.spec
args = []
args.append(self.define('POLYCLIPPER_BLT_DIR', spec['blt'].prefix))
args.append(self.define('ENABLE_CXXONLY', True))
args.append(self.define('PYTHON_EXE', spec['python'].prefix+'/bin/python'))
args.append(self.define('PYBIND11_INCLUDE_PATH', spec['py-pybind11'].prefix+'/include'))
args.append(self.define('PYB11GEN_PATH', spec['py-pyb11generator'].prefix+'/lib/python2.7/site-packages'))
args.append(self.define('ENABLE_MPI', '+mpi' in spec))
if "+mpi" in spec:
args.append(self.define('MPI_C_COMPILER', spec['mpi'].mpicc) )
args.append(self.define('MPI_CXX_COMPILER', spec['mpi'].mpicxx) )
args.append(self.define('ENABLE_OPENMP', '+openmp' in spec))
args.append(self.define('ENABLE_DOCS', '+docs' in spec))
return args
```
#### File: src/Distributed/fakempi.py
```python
print "Invoking fake mpi module."
rank = 0
procs = 1
MIN = -1
MAX = -2
SUM = -3
def is_fake_mpi():
return True
def reduce(var, op):
return var
def allreduce(var, op):
return var
def gather(obj, root=0):
return [obj,]
def allgather(obj, op):
return [obj,]
def bcast(obj, root=0):
return obj
def barrier():
return
```
#### File: src/helpers/prepend_dir.py
```python
import string
import sys
def prepend(line, directory):
newline = ''
for word in string.split(line, ' '):
if word:
newline = newline + directory + word + ' '
print newline
if __name__ == '__main__':
exec sys.argv[-1]
```
#### File: src/Material/MaterialUnits.py
```python
from SpheralCompiledPackages import PhysicalConstants
#-------------------------------------------------------------------------------
# MKS units.
#-------------------------------------------------------------------------------
class MKS(PhysicalConstants):
def __init__(self):
PhysicalConstants.__init__(self,
1.0, # Unit length (m)
1.0, # Unit mass (kg)
1.0, # Unit time (sec)
1.0, # Unit temp (kelvin)
1.0) # Unit charge (coulomb)
return
#-------------------------------------------------------------------------------
# CGS units.
#-------------------------------------------------------------------------------
class CGS(PhysicalConstants):
def __init__(self):
PhysicalConstants.__init__(self,
0.01, # Unit length (m)
0.001, # Unit mass (kg)
1.0, # Unit time (sec)
1.0, # Unit temp (kelvin)
1.0) # Unit charge (coulomb)
return
#-------------------------------------------------------------------------------
# Cosmological units (Mpc, Mmsun, Myr)
#-------------------------------------------------------------------------------
class Cosmological(PhysicalConstants):
def __init__(self):
PhysicalConstants.__init__(self,
3.08567757e22, # Unit length (m)
1.9891e36, # Unit mass (kg)
3.155674e19, # Unit time (sec)
1.0, # Unit temp (kelvin)
1.0) # Unit charge (coulomb)
return
#-------------------------------------------------------------------------------
# Solar units. (AU, Msun, yr)
#-------------------------------------------------------------------------------
class Solar(PhysicalConstants):
def __init__(self):
PhysicalConstants.__init__(self,
149597870700.0, # Unit length (m)
1.98892e30, # Unit mass (kg)
365.25*3600*24, # Unit time (sec)
1.0, # Unit temp (kelvin)
1.0) # Unit charge (coulomb)
return
```
#### File: src/NodeGenerators/centroidalRelaxNodes.py
```python
from math import *
import mpi
import Spheral
from siloPointmeshDump import *
try:
from SpheralVoronoiSiloDump import SpheralVoronoiSiloDump
except:
print "centroidalRelaxNoddes unable to import SpheralVoronoiSiloDump -- no tessellation output supported."
SpheralVoronoiSiloDump = None
#-------------------------------------------------------------------------------
# Centroidally (in mass) relax points allowing a linear density gradient.
#-------------------------------------------------------------------------------
def centroidalRelaxNodes(nodeListsAndBounds,
W,
rho,
gradrho = None,
boundaries = [],
maxIterations = 100,
maxFracTol = 1.0e-2,
avgFracTol = 1.0e-3,
correctionOrder = Spheral.LinearOrder,
centroidFrac = 0.25,
tessellationBaseDir = ".",
tessellationFileName = None):
# Decide on our dimensionality and import the appropriate aliases.
assert (isinstance(W, Spheral.TableKernel1d) or
isinstance(W, Spheral.TableKernel2d) or
isinstance(W, Spheral.TableKernel3d))
if isinstance(W, Spheral.TableKernel1d):
import Spheral1d as sph
FacetedVolume = sph.Box1d
ndim = 1
elif isinstance(W, Spheral.TableKernel2d):
import Spheral2d as sph
FacetedVolume = sph.Polygon
ndim = 2
else:
import Spheral3d as sph
FacetedVolume = sph.Polyhedron
ndim = 3
# Did we get passed a function or a constant for the density?
if type(rho) is float:
rhoConst = True
class rhofunctor(sph.VectorScalarFunctor):
def __init__(self):
sph.VectorScalarFunctor.__init__(self)
def __call__(self, posi):
return rho
else:
rhoConst = False
class rhofunctor(sph.VectorScalarFunctor):
def __init__(self):
sph.VectorScalarFunctor.__init__(self)
def __call__(self, posi):
return rho(posi)
rhofunc = rhofunctor()
# What about the gradrho? Did we get passed anything?
if gradrho is None:
useGradRhoFunc = False
class gradrhofunctor(sph.VectorVectorFunctor):
def __init__(self):
sph.VectorVectorFunctor.__init__(self)
def __call__(self, posi):
assert "Hey gradrhofunc unimplemented!"
return 0.0
else:
useGradRhoFunc = True
if type(gradrho) is float:
class gradrhofunctor(sph.VectorVectorFunctor):
def __init__(self):
sph.VectorVectorFunctor.__init__(self)
def __call__(self, posi):
return gradrho
else:
class gradrhofunctor(sph.VectorVectorFunctor):
def __init__(self):
sph.VectorVectorFunctor.__init__(self)
def __call__(self, posi):
return gradrho(posi)
gradrhofunc = gradrhofunctor()
# Split out the NodeLists and bounding volumes (if available), depending on what was passed.
bounds = sph.vector_of_FacetedVolume()
holes = sph.vector_of_vector_of_FacetedVolume()
for x in nodeListsAndBounds:
if type(nodeListsAndBounds[0]) is tuple:
bounds = sph.vector_of_FacetedVolume([FacetedVolume()]*len(nodeListsAndBounds))
holes = sph.vector_of_vector_of_FacetedVolume([sph.vector_of_FacetedVolume()]*len(nodeListsAndBounds))
if len(bounds) > 0:
nodeLists = []
for i, xtup in enumerate(nodeListsAndBounds):
if type(xtup) is tuple:
nodeLists.append(xtup[0])
assert len(xtup) in (2,3)
bounds[i] = xtup[1]
if len(xtup) == 3: # Check for holes
assert type(xtup[2]) is list
for x in xtup[2]:
holes[i].append(x)
else:
nodeLists.append(xtup)
else:
nodeLists = nodeListsAndBounds
# Build a local DataBase.
db = sph.DataBase()
for nodes in nodeLists:
db.appendNodeList(nodes)
# We need the boundaries as a vector
bound_vec = sph.vector_of_Boundary()
for bc in boundaries:
bound_vec.append(bc)
# Prepare the return FieldLists.
vol = db.newFluidScalarFieldList(0.0, "volume")
surfacePoint = sph.IntFieldList()
cells = sph.FacetedVolumeFieldList()
cellFaceFlags = db.newGlobalvector_of_CellFaceFlagFieldList(sph.vector_of_CellFaceFlag(), "face flags")
etaVoidPoints = db.newGlobalvector_of_VectorFieldList(eval("sph.vector_of_Vector%id()" % db.nDim), "eta void points")
# Initialize volume
massf = db.fluidMass
rhof = db.fluidMassDensity
numNodeLists = db.numFluidNodeLists
for k in xrange(numNodeLists):
n = massf[k].numInternalElements
for i in xrange(n):
assert massf(k,i) > 0.0, "Bad mass (%i,%i), %g" % (k, i, massf(k,i))
assert rhof(k,i) > 0.0, "Bad density (%i,%i), %g" % (k, i, rhof(k,i))
vol[k][i] = massf(k,i)/rhof(k,i)
# We let the C++ method do the heavy lifting.
iterations = sph.centroidalRelaxNodesImpl(db,
bounds,
holes,
W,
rhofunc,
gradrhofunc,
rhoConst,
useGradRhoFunc,
bound_vec,
maxIterations,
maxFracTol,
avgFracTol,
correctionOrder,
centroidFrac,
vol,
surfacePoint,
cells)
# Make a final call to computeVoronoiVolume to get the more expensive surfacePoint and cells fields.
surfacePoint = db.newFluidIntFieldList(0, "surface point")
deltaMedian = db.newFluidVectorFieldList(sph.Vector.zero, "delta medial position")
if tessellationFileName:
cells = db.newFluidFacetedVolumeFieldList(sph.FacetedVolume(), "cells")
sph.computeVoronoiVolume(db.fluidPosition,
db.fluidHfield,
db.connectivityMap(),
sph.SymTensorFieldList(), # no damage
bounds,
holes,
bound_vec,
sph.ScalarFieldList(), # no weights
surfacePoint,
vol,
deltaMedian,
etaVoidPoints,
cells,
cellFaceFlags)
# Update the masses using rho and volume.
rho = db.fluidMassDensity
for k, nodes in enumerate(db.fluidNodeLists()):
n = nodes.numInternalNodes
mass = nodes.mass()
for i in xrange(n):
assert vol(k,i) > 0.0
mass[k] = vol(k,i)*rho(k,i)
# If requested, dump the final info to a diagnostic viz file.
if tessellationFileName and SpheralVoronoiSiloDump:
dumper = SpheralVoronoiSiloDump(baseFileName = tessellationFileName,
baseDirectory = tessellationBaseDir,
listOfFieldLists = [vol, surfacePoint, db.fluidMass, db.fluidMassDensity],
boundaries = boundaries,
cells = cells)
dumper.dump(0.0, iterations)
siloPointmeshDump(baseName = tessellationFileName + "_points",
fieldLists = [vol, surfacePoint, db.fluidMass, db.fluidMassDensity])
return vol, surfacePoint
```
#### File: src/NodeGenerators/DumpGzipFileNodeGenerator.py
```python
from math import *
import mpi, gzip
import Spheral
class DumpGzipFileNodeGenerator:
#---------------------------------------------------------------------------
# Constructor.
#---------------------------------------------------------------------------
def __init__(self,
nodeLists,
filename,
precision = 20,
serialize = False,
extraFields = [],
):
self.nodeLists = nodeLists
self.filename = filename
self.precision = "%" + "%i.%ie" % (precision + 3, precision)
self.serialize = serialize
self.extraFields = extraFields
self.delimiter = "$"
self.writing = (mpi.rank == 0 or not serialize)
# Some mappings to help us write out various data types.
self._toString = {int : self._int2String,
bool : self._bool2String,
float : self._float2String,
str : self._str2String,
type(Spheral.Vector1d()) : self._ContainerFloats2String,
type(Spheral.Tensor1d()) : self._ContainerFloats2String,
type(Spheral.SymTensor1d()) : self._ContainerFloats2String,
type(Spheral.Vector2d()) : self._ContainerFloats2String,
type(Spheral.Tensor2d()) : self._ContainerFloats2String,
type(Spheral.SymTensor2d()) : self._ContainerFloats2String,
type(Spheral.Vector3d()) : self._ContainerFloats2String,
type(Spheral.Tensor3d()) : self._ContainerFloats2String,
type(Spheral.SymTensor3d()) : self._ContainerFloats2String,
}
# Open the file.
if self.writing:
self.f = gzip.open(filename, "w")
else:
self.f = None
if isinstance(nodeLists[0], Spheral.NodeList1d):
SymTensorField = Spheral.SymTensorField1d
elif isinstance(nodeLists[0], Spheral.NodeList2d):
SymTensorField = Spheral.SymTensorField2d
elif isinstance(nodeLists[0], Spheral.NodeList3d):
SymTensorField = Spheral.SymTensorField3d
# Walk the NodeLists and write the standard fields.
for nodes in self.nodeLists:
self.writeField(nodes.positions(), nodes.name, "positions")
self.writeField(nodes.mass(), nodes.name, "mass")
self.writeField(nodes.massDensity(), nodes.name, "density")
self.writeField(nodes.velocity(), nodes.name, "velocity")
self.writeField(nodes.specificThermalEnergy(), nodes.name, "specificThermalEnergy")
Hinv2 = SymTensorField("Hinv2", nodes)
nodes.Hinverse(Hinv2)
for i in xrange(nodes.numInternalNodes):
thpt = (Hinv2[i]*Hinv2[i]).Symmetric()
Hinv2[i] = thpt
self.writeField(Hinv2, nodes.name, "Hinverse2")
# Add any extra fields requested.
for field in self.extraFields:
nodes = field.nodeList()
self.writeField(field, nodes.name, field.name)
return
#---------------------------------------------------------------------------
# Define our internal conversion functions.
#---------------------------------------------------------------------------
def _int2String(self, x):
return str(x)
def _bool2String(self, x):
return str(x)
def _float2String(self, x):
return self.precision % x
def _str2String(self, x):
return x
def _ContainerFloats2String(self, x):
result = ""
for xi in x:
result += self.precision % xi + " "
return result
#---------------------------------------------------------------------------
# Write a field in our format.
#---------------------------------------------------------------------------
def writeField(self,
field,
materialName,
fieldName):
print "Writing %s for %s." % (fieldName, materialName)
vals = list(field.internalValues())
n = len(vals)
if self.serialize:
n = mpi.allreduce(n, mpi.SUM)
if self.writing:
self.f.write(materialName + self.delimiter +
fieldName)
if self.serialize:
for sendProc in xrange(mpi.procs):
print "Collecting from ", sendProc
otherVals = mpi.bcast(vals, sendProc)
print " Received %i values" % len(otherVals)
if self.writing:
for x in otherVals:
self.f.write(self.delimiter + self._toString[type(x)](x))
else:
if self.writing:
for x in vals:
self.f.write(self.delimiter + self._toString[type(x)](x))
if self.writing:
self.f.write("\n")
return
```
#### File: src/NodeGenerators/GenerateNodeProfile.py
```python
from math import *
import numpy as np
from NodeGeneratorBase import *
from Spheral import (Vector1d, Tensor1d, SymTensor1d,
Vector2d, Tensor2d, SymTensor2d, rotationMatrix2d, testPointInBox2d,
Vector3d, Tensor3d, SymTensor3d, rotationMatrix3d, testPointInBox3d)
from SpheralTestUtilities import fuzzyEqual
#-------------------------------------------------------------------------------
# Class to generate 1-D node positions for a fixed node mass to fit the given
# density profile in a range (xmin, xmax).
#-------------------------------------------------------------------------------
class GenerateNodeProfile1d(NodeGeneratorBase):
#---------------------------------------------------------------------------
# Constructor
#---------------------------------------------------------------------------
def __init__(self,
nx, # number of points to generate
rho, # density profile
xmin,
xmax,
nNodePerh = 2.01,
numbins = 10000):
assert nx > 0
assert xmin < xmax
assert nNodePerh > 0.0
# If the user provided a constant for rho, then use the constantRho
# class to provide this value.
if type(rho) == type(1.0):
self.rhofunc = ConstantRho(rho)
# In the constant rho case, no need to kill ourselves figuring out complicated fits...
dx = (xmax - xmin)/nx
mi = dx*rho
self.x = [xmin + (i+0.5)*dx for i in xrange(nx)]
self.H = [SymTensor1d(1.0/(nNodePerh*dx)) for i in xrange(nx)]
self.m = [mi]*nx
self.rho = [rho]*nx
else:
self.rhofunc = rho
# Build the evenly sampled cumulative mass as a function of position.
ok = False
while not ok:
dx = (xmax - xmin)/numbins
mcum = np.cumsum(np.array([0.0] + [0.5*dx*(self.rhofunc(xmin + i*dx) + self.rhofunc(xmin + (i + 1)*dx)) for i in xrange(numbins)]))
# Find the target mass per node.
mi = mcum[-1]/nx
# Do we need to have a finer binning?
if mcum[-1]/mi > 0.5*numbins:
numbins = int(2*mcum[-1]/mi)
print "Warning, boosting numbins to %i to increase mass resolution for interpolation" % numbins
else:
ok = True
# Now go through and bisect for positions to get the mass per point we want.
xi = xmin
self.x = []
self.rho = []
mtarget = -0.5*mi
while xi < xmax:
mtarget += mi
if mtarget <= mcum[-1]:
i = np.searchsorted(mcum, mtarget) - 1
assert mtarget >= mcum[i] and mtarget <= mcum[i+1]
xi = xmin + (i + (mtarget - mcum[i])/(mcum[i+1] - mcum[i]))*dx
assert (xi >= xmin + i*dx) and (xi <= xmin + (i+1)*dx)
self.x.append(xi)
self.rho.append(self.rhofunc(xi))
else:
xi = xmax
n = len(self.x)
print "Generated %i 1D points." % n
self.m = [mi]*n
# Figure out the H.
self.H = []
for i in xrange(n):
if i == 0:
dxavg = self.x[i+1] - self.x[i]
elif i == n-1:
dxavg = self.x[i] - self.x[i-1]
else:
dxavg = 0.5*(self.x[i+1] - self.x[i-1])
self.H.append(SymTensor1d(1.0/(nNodePerh*dxavg)))
# Have the base class break up the serial node distribution
# for parallel cases.
NodeGeneratorBase.__init__(self, True,
self.x, self.m, self.rho, self.H)
return
#---------------------------------------------------------------------------
# Get the position for the given node index.
#---------------------------------------------------------------------------
def localPosition(self, i):
assert i >= 0 and i < len(self.x)
return Vector1d(self.x[i])
#---------------------------------------------------------------------------
# Get the mass for the given node index.
#---------------------------------------------------------------------------
def localMass(self, i):
assert i >= 0 and i < len(self.m)
return self.m[i]
#---------------------------------------------------------------------------
# Get the mass density for the given node index.
#---------------------------------------------------------------------------
def localMassDensity(self, i):
assert i >= 0 and i < len(self.x)
return self.rho[i]
#---------------------------------------------------------------------------
# Get the H tensor for the given node index.
#---------------------------------------------------------------------------
def localHtensor(self, i):
assert i >= 0 and i < len(self.H)
return self.H[i]
#-------------------------------------------------------------------------------
# Similarly generate a 1D profile in 2D along the x-direction.
#-------------------------------------------------------------------------------
class GeneratePlanarNodeProfile2d(NodeGeneratorBase):
#---------------------------------------------------------------------------
# Constructor
#---------------------------------------------------------------------------
def __init__(self,
nx, # target number of points in x
ny, # target number of points in y
rho, # density profile, must be 1D function
xmin, # (xmin, ymin) coordinates
xmax, # (xmax, ymax) coordinates
nNodePerh = 2.01,
numbins = 10000,
SPH = False):
assert nx > 0
assert ny > 0
assert xmin[0] < xmax[0]
assert xmin[1] < xmax[1]
assert nNodePerh > 0.0
# First use the 1D generator to generate a 1D slice profile along x.
gen1d = GenerateNodeProfile1d(nx = nx,
rho = rho,
xmin = xmin[0],
xmax = xmax[0],
nNodePerh = nNodePerh,
numbins = numbins)
# Stitch the 1D profiles back into serial data.
gen1d.x = mpi.allreduce(gen1d.x, mpi.SUM)
gen1d.m = mpi.allreduce(gen1d.m, mpi.SUM)
gen1d.rho = mpi.allreduce(gen1d.rho, mpi.SUM)
gen1d.H = mpi.allreduce(gen1d.H, mpi.SUM)
n1d = len(gen1d.x)
# Replicate the 1D slices into the full 2D data.
self.x = []
self.y = []
self.m = []
self.rho = []
self.H = []
dy = (xmax[1] - xmin[1])/ny
hyinv = 1.0/(nNodePerh*dy)
for iy in xrange(ny):
self.x += gen1d.x
self.y += [xmin[1] + (iy + 0.5)*dy]*n1d
self.m += [mi*(xmax[1] - xmin[1])/ny for mi in gen1d.m]
self.rho += gen1d.rho
self.H += [SymTensor2d(H1d.xx, 0.0, 0.0, hyinv) for H1d in gen1d.H]
# Have the base class break up the serial node distribution
# for parallel cases.
NodeGeneratorBase.__init__(self, True,
self.x, self.y, self.m, self.rho, self.H)
# If we're forcing round H tensors, do it.
if SPH:
self.makeHround()
return
#---------------------------------------------------------------------------
# Get the position for the given node index.
#---------------------------------------------------------------------------
def localPosition(self, i):
assert i >= 0 and i < len(self.x)
assert len(self.x) == len(self.y)
return Vector2d(self.x[i], self.y[i])
#---------------------------------------------------------------------------
# Get the mass for the given node index.
#---------------------------------------------------------------------------
def localMass(self, i):
assert i >= 0 and i < len(self.m)
return self.m[i]
#---------------------------------------------------------------------------
# Get the mass density for the given node index.
#---------------------------------------------------------------------------
def localMassDensity(self, i):
assert i >= 0 and i < len(self.x)
return self.rho[i]
#---------------------------------------------------------------------------
# Get the H tensor for the given node index.
#---------------------------------------------------------------------------
def localHtensor(self, i):
assert i >= 0 and i < len(self.H)
return self.H[i]
#-------------------------------------------------------------------------------
# Similarly generate a 1D profile in 3D along the x-direction.
#-------------------------------------------------------------------------------
class GeneratePlanarNodeProfile3d(NodeGeneratorBase):
#---------------------------------------------------------------------------
# Constructor
#---------------------------------------------------------------------------
def __init__(self,
nx, # target number of points in x
ny, # target number of points in y
nz, # target number of points in z
rho, # density profile, must be 1D function
xmin, # (xmin, ymin, zmin) coordinates
xmax, # (xmax, ymax, zmax) coordinates
nNodePerh = 2.01,
numbins = 10000,
SPH = False):
assert nx > 0
assert ny > 0
assert nz > 0
assert xmin[0] < xmax[0]
assert xmin[1] < xmax[1]
assert xmin[2] < xmax[2]
assert nNodePerh > 0.0
# First use the 1D generator to generate a 1D slice profile along x.
gen1d = GenerateNodeProfile1d(nx = nx,
rho = rho,
xmin = xmin[0],
xmax = xmax[0],
nNodePerh = nNodePerh,
numbins = numbins)
# Stitch the 1D profiles back into serial data.
gen1d.x = mpi.allreduce(gen1d.x, mpi.SUM)
gen1d.m = mpi.allreduce(gen1d.m, mpi.SUM)
gen1d.rho = mpi.allreduce(gen1d.rho, mpi.SUM)
gen1d.H = mpi.allreduce(gen1d.H, mpi.SUM)
n1d = len(gen1d.x)
# Replicate the 1D slices into the full 3D data.
self.x = []
self.y = []
self.z = []
self.m = []
self.rho = []
self.H = []
dy = (xmax[1] - xmin[1])/ny
dz = (xmax[2] - xmin[2])/nz
hyinv = 1.0/(nNodePerh*dy)
hzinv = 1.0/(nNodePerh*dz)
for iz in xrange(nz):
for iy in xrange(ny):
self.x += gen1d.x
self.y += [xmin[1] + (iy + 0.5)*dy]*n1d
self.z += [xmin[2] + (iz + 0.5)*dz]*n1d
self.m += [mi*(xmax[1] - xmin[1])*(xmax[2] - xmin[2])/(ny*nz) for mi in gen1d.m]
self.rho += gen1d.rho
self.H += [SymTensor3d(H1d.xx, 0.0, 0.0,
0.0, hyinv, 0.0,
0.0, 0.0, hzinv) for H1d in gen1d.H]
# Have the base class break up the serial node distribution
# for parallel cases.
NodeGeneratorBase.__init__(self, True,
self.x, self.y, self.z, self.m, self.rho, self.H)
# If we're forcing round H tensors, do it.
if SPH:
self.makeHround()
return
#---------------------------------------------------------------------------
# Get the position for the given node index.
#---------------------------------------------------------------------------
def localPosition(self, i):
assert i >= 0 and i < len(self.x)
assert len(self.x) == len(self.y)
assert len(self.x) == len(self.z)
return Vector3d(self.x[i], self.y[i], self.z[i])
#---------------------------------------------------------------------------
# Get the mass for the given node index.
#---------------------------------------------------------------------------
def localMass(self, i):
assert i >= 0 and i < len(self.m)
return self.m[i]
#---------------------------------------------------------------------------
# Get the mass density for the given node index.
#---------------------------------------------------------------------------
def localMassDensity(self, i):
assert i >= 0 and i < len(self.x)
return self.rho[i]
#---------------------------------------------------------------------------
# Get the H tensor for the given node index.
#---------------------------------------------------------------------------
def localHtensor(self, i):
assert i >= 0 and i < len(self.H)
return self.H[i]
```
#### File: src/NodeGenerators/QuaquaversalTiling3d.py
```python
from math import *
from NodeGeneratorBase import *
from Spheral import Vector3d
from Spheral import Tensor3d
from Spheral import SymTensor3d
from Spheral import pair_double_double
from Spheral import vector_of_int, vector_of_double, vector_of_SymTensor3d, vector_of_vector_of_double
from SpheralTestUtilities import *
import numpy as np
import mpi
procID = mpi.rank
nProcs = mpi.procs
#-------------------------------------------------------------------------------
# Class to generate 3-D node positions in a QVT fashion
# This is a direct port of Steen Hansen's QVT code. See Hansen et al. 2007
#-------------------------------------------------------------------------------
class QuaquaversalTiling3d(NodeGeneratorBase):
#---------------------------------------------------------------------------
# Constructor
#---------------------------------------------------------------------------
def __init__(self,
n = 100,
xmin = 0.0,
xmax = 1.0,
rho = 1.0,
nNodePerh = 2.0,
offset=None,
rejecter=None,
maxLevel=6):
self.maxLevel = maxLevel
self.xmin = xmin
self.xmax = xmax
self.corners = []
self.qua = []
self.sph = []
self.x = []
self.y = []
self.z = []
self.m = []
self.H = []
self.mm = np.array([
[[1.0,0.0,0.0,0.0],
[0.5,0.5,0.0,0.0],
[0.5,0.0,0.5,0.0],
[0.5,0.0,0.0,0.5]],
[[0.0,1.0,0.0,0.0],
[0.5,0.5,0.0,0.0],
[0.5,0.5,-0.5,0.5],
[0.5,0.0,0.0,0.5]],
[[0.5, 0.5, -0.5, 0.5],
[0., 1., -0.5, 0.5],
[0., 1., 0., 0.],
[0., 0.5, 0.5, 0.]],
[[0.5, 0., 0.5, 0.],
[0., 0.5, 0.5, 0.],
[0., 0., 1., 0.],
[0., 0., 0.5, 0.5]],
[[1., 0., -0.5, 0.5],
[0.5, 0.5, -0.5, 0.5],
[0.5, 0., 0., 0.5],
[0.5, 0., -0.5, 1.]],
[[0., 1., -1., 1.],
[0.5, 0.5, -1., 1.],
[0.5, 0., -0.5, 1.],
[0.5, 0., 0., 0.5]],
[[0., 0., 0., 1.],
[0.25, 0.5, -0.75, 1.],
[0.5, 0., -0.5, 1.],
[0.5, 0., 0., 0.5]],
[[0., 0., 0.5, 0.5],
[0.25, 0.5, -0.25, 0.5],
[0., 1., -0.5, 0.5],
[0., 1., -1., 1.]]])
self.fixpt = np.array([0.4285714, 0.2857143, 0.1428571])
vec = np.array([0., - sqrt(3.), 0.,
0., 0., 0.,
0., 0., 1.,
-1., 0., 1.])
level = 0
ii = 0
n = self.checkNorm(level,ii)
dx = (xmax-xmin)
vo = dx**3
nd = n/vo
m0 = rho/nd
vi = 1.0/nd
hi = 2.0*nNodePerh*pow(3.0/(4.0*pi)*vi,1.0/3.0)
Hi = SymTensor3d(1.0/hi, 0.0, 0.0,
0.0, 1.0/hi, 0.0,
0.0, 0.0, 1.0/hi)
vec = moveCenter(vec)
#print vec
vec = recurse(level,ii,vec)
#print self.sph
for i in xrange(len(self.sph)):
self.x.append(self.scale(self.sph[i][0]))
self.y.append(self.scale(self.sph[i][1]))
self.z.append(self.scale(self.sph[i][2]))
self.m.append(m0)
self.H.append(Hi)
# Initialize the base class. If "serialInitialization" is True, this
# is where the points are broken up between processors as well.
serialInitialization = True
NodeGeneratorBase.__init__(self, serialInitialization,
self.x, self.y, self.z, self.m, self.H)
return
#---------------------------------------------------------------------------
# Get the position for the given node index.
#---------------------------------------------------------------------------
def localPosition(self, i):
assert i >= 0 and i < len(self.x)
assert len(self.x) == len(self.y) == len(self.z)
return Vector3d(self.x[i], self.y[i], self.z[i])
#---------------------------------------------------------------------------
# Get the mass for the given node index.
#---------------------------------------------------------------------------
def localMass(self, i):
assert i >= 0 and i < len(self.m)
return self.m[i]
#---------------------------------------------------------------------------
# Get the mass density for the given node index.
#---------------------------------------------------------------------------
def localMassDensity(self, i):
loc = Vector3d(0,0,0)
loc = self.localPosition(i) - self.offset
return self.densityProfileMethod(loc.magnitude())
#---------------------------------------------------------------------------
# Get the H tensor for the given node index.
#---------------------------------------------------------------------------
def localHtensor(self, i):
assert i >= 0 and i < len(self.H)
return self.H[i]
def checkNorm(self,ii):
n = pow(8,self.maxLevel)
print "This will produce %e points" % n
for i in xrange(8):
for j in xrange(4):
b = 0
for k in xrange(4):
b = b + self.mm[i][j][k]
if (b!=1):
print "b = %f,%i,%i" %(b,i,j)
return n
def moveCenter(self,vec):
for i in xrange(4):
j = i*3
print "%f %f %f" % (vec[j],vec[j+1],vec[j+2])
for j in xrange(4):
i = j*3
vec[i] += (1.0-self.fixpt[0])
vec[i+1] += sqrt(3.0)*self.fixpt[1]
vec[i+2] += -self.fixpt[2]
return vec
def savePositions(self,vec):
for i in xrange(4):
self.corners.append([vec[i*3],vec[i*3+1],vec[i*3+2]])
self.corners.append([vec[6],vec[7],vec[8]])
self.corners.append([vec[0],vec[1],vec[2]])
return
def writeCenter(self,vec):
cntr = np.zeros(3)
for i in xrange(3):
cntr[i] = vec[3+i] + (1.0-self.fixpt[0])*(vec[9+i]-vec[6+i])
cntr[i] += self.fixpt[1]*(vec[i]-vec[3+i])
cntr[i] += self.fixpt[2]*(vec[6+i]-vec[3+i])
range = 0.14
if ((abs(cntr[0]) < range) and (abs(cntr[1]) < range) and (abs(cntr[2]) < range)):
self.qua.append([cntr[0],cntr[1],cntr[2]])
radius = 0.0
for i in xrange(3):
radius += cntr[i]*cntr[i]
radius = sqrt(radius)
if (radius < range):
self.sph.append([cntr[0],cntr[1],cntr[2]])
return
def recurse(self,level,ii,vec):
vec2 = np.zeros(12)
if (level == self.maxLevel):
self.writeCenter(vec)
return vec
else:
level += 1
for ii in xrange(8):
for i in xrange(12):
vec2[i] = 0
for i in xrange(4):
for k in xrange(3):
for j in xrange(4):
vec2[k+3*i] += self.mm[ii][i][j] * vec[k+3*j]
vec2 = recurse(level,ii,vec2)
return vec2
def scale(self,x):
ymin = -0.14
ymax = 0.14
return (ymax-ymin)/(self.xmax-self.xmin)*(x-self.xmin) + ymin
```
#### File: src/NodeGenerators/RecursivePrimitiveRefinementGenerator.py
```python
from math import *
from NodeGeneratorBase import *
from Spheral import Vector3d
from Spheral import Tensor3d
from Spheral import SymTensor3d
from Spheral import vector_of_int, vector_of_double, vector_of_SymTensor3d, vector_of_vector_of_double
import mpi
rank = mpi.rank
procs = mpi.procs
class RPRPSGenerator3d(NodeGeneratorBase):
def __init__(self,n,densityProfileMethod,
rmin = 0.0,
rmax = 0.0,
thetaMin = 0.0,
thetaMax = pi,
phiMin = 0.0,
phiMax = 2.0*pi,
nNodePerh = 2.01,
offset = None,
rejecter = None,
m0 = 0.0):
assert n > 0
assert rmin < rmax
assert thetaMin < thetaMax
assert thetaMin >= 0.0 and thetaMin <= 2.0*pi
assert thetaMax >= 0.0 and thetaMax <= 2.0*pi
assert phiMin < phiMax
assert phiMin >= 0.0 and phiMin <= 2.0*pi
assert phiMax >= 0.0 and phiMax <= 2.0*pi
assert nNodePerh > 0.0
assert offset is None or len(offset)==3
self.rejecter = None
if rejecter:
self.rejecter = rejecter
import random
if offset is None:
self.offset = Vector3d(0,0,0)
else:
self.offset = Vector3d(offset[0],offset[1],offset[2])
self.n = n
self.rmin = rmin
self.rmax = rmax
self.thetaMin = thetaMin
self.thetaMax = thetaMax
self.phiMin = phiMin
self.phiMax = phiMax
self.nNodePerh = nNodePerh
# If the user provided a constant for rho, then use the constantRho
# class to provide this value.
if type(densityProfileMethod) == type(1.0):
self.densityProfileMethod = ConstantRho(densityProfileMethod)
else:
self.densityProfileMethod = densityProfileMethod
# Determine how much total mass there is in the system.
self.totalMass = self.integrateTotalMass(self.densityProfileMethod,
rmin, rmax,
thetaMin, thetaMax,
phiMin, phiMax)
print "Total mass of %g in the range r = (%g, %g), theta = (%g, %g), phi = (%g, %g)" % \
(self.totalMass, rmin, rmax, thetaMin, thetaMax, phiMin, phiMax)
# Now set the nominal mass per node.
if (m0 == 0.0):
self.m0 = self.totalMass/n
else:
self.m0 = m0
n = int(self.totalMass/self.m0)
assert self.m0 > 0.0
print "Nominal mass per node of %g for %d nodes." % (self.m0,n)
from Spheral import SymTensor3d
self.x = []
self.y = []
self.z = []
self.m = []
self.H = []
ri = rmax
# new formula for calculating number of points for a given subdivision level
# (Nf * Np(n) - Ne * Npe(n) + Nc)
# Nf = Number of faces of primitive shape
# Np(n) = Number of points in a triangle subdivided n times
# 2^(2n-1) + 3*2^(n-1) + 1
# Ne = Number of edges of primitive shape
# Npe(n) = Number of points along an edge of primitive shape subdivided n times
# 2^n + 1
# Nc = Number of corners
# shapeData = [Nf,Ne,Nc]
shapeData = [[ 6, 9, 5],
[ 8,12, 6],
[12,18, 8],
[20,30,12]]
# first column is total number of shell points
# second column is number of refinements to reach that shell count
# third column is shape choice that reaches that shell count
resolution = [[5,0,0],
[6,0,1],
[8,0,2],
[12,0,3],
[14,1,0],
[18,1,1],
[26,1,2],
[42,1,3],
[50,2,0],
[66,2,1],
[98,2,2],
[162,2,3],
[194,3,0],
[258,3,1],
[386,3,2],
[642,3,3],
[770,4,0],
[1026,4,1],
[1538,4,2],
[2562,4,3],
[3074,5,0],
[4098,5,1],
[6146,5,2],
[10242,5,3],
[12290,6,0],
[16386,6,1],
[24578,6,2],
[40962,6,3],
[49154,7,0],
[65538,7,1],
[98306,7,2],
[163842,7,3],
[196610,8,0],
[262146,8,1],
[393218,8,2]]
while ri > rmin:
# create the database of faces and positions
self.positions = [] # [index,[point]]
self.middlePoints = [] # [i,[key,index]]
self.faces = []
self.index = 0
# Get the nominal delta r, number of nodes,
# and mass per node at this radius.
rhoi = self.densityProfileMethod(ri)
dr = pow(self.m0/(rhoi),1.0/3.0)
#dr = min(dr,ri-rmin)
rii = abs(ri - 0.5*dr)
# now compute a new dr based on rii
# this should in theory protect against the half bin radius being
# below rmin while not sacrificing the mass of the entire shell
# with a simple if condition
rhoi = self.densityProfileMethod(rii)
dr = pow(self.m0/rhoi,1.0/3.0)
#mshell = rhoi * 4.0*pi*ri*ri*dr
mshell = self.integrateTotalMass(self.densityProfileMethod,
ri-dr, ri,
0, pi,
0, 2*pi)
nshell = int(mshell / self.m0+0.5)
nshell = max(nshell,1)
nr = 0
ver = 0
counts = []
hi = nNodePerh*(dr)
Hi = SymTensor3d(1.0/hi, 0.0, 0.0,
0.0, 1.0/hi, 0.0,
0.0, 0.0, 1.0/hi)
mi = mshell / float(nshell)
random.seed(nshell)
dt = random.random()*pi
dt2 = random.random()*pi
rot = [[1.0,0.0,0.0],[0.0,cos(dt),-sin(dt)],[0.0,sin(dt),cos(dt)]]
rot2 = [[cos(dt2),0.0,sin(dt2)],[0.0,1.0,0.0],[-sin(dt2),0.0,cos(dt2)]]
if (nshell > 4 and nshell<163):
if (mpi.rank == 0):
for i in xrange(len(shapeData)):
nc = 0
nco = 0
nrf = 0
while (nc < nshell):
nrf += 1
nco = nc
nc = self.shapeCount(nrf,shapeData[i])
counts.append([i,nrf-1,nco])
counts.append([i,nrf,nc])
diff = 1e13
for i in xrange(len(counts)):
dd = abs(counts[i][2] - nshell)
if (dd < diff):
diff = dd
ver = counts[i][0]
nr = counts[i][1]
if (nr<0):
nr = 0
if (ver==0):
self.createHexaSphere(nr)
elif (ver==1):
self.createOctaSphere(nr)
elif (ver==2):
self.createCubicSphere(nr)
else:
self.createIcoSphere(nr)
for n in xrange(len(self.positions)):
self.positions[n] = self.rotater(self.positions[n],rot,rot2)
elif(nshell==1 and mi> 0.5 * self.m0):
if (mpi.rank == 0):
if rejecter:
if rejecter.accept(0,0,0):
self.positions.append([0,0,0])
else:
self.positions.append([0,0,0])
elif(nshell==2):
if (mpi.rank == 0):
position1 = self.rotater([0,0,1],rot,rot2)
position2 = self.rotater([0,0,-1],rot,rot2)
if rejecter:
if rejecter.accept(rii*position1[0],rii*position1[1],rii*position1[2]):
self.positions.append(position1)
if rejecter.accept(rii*position2[0],rii*position2[1],rii*position2[2]):
self.positions.append(position2)
else:
self.positions.append(position1)
self.positions.append(position2)
elif(nshell==3):
if (mpi.rank == 0):
t = sqrt(3)/2.0
position1 = self.rotater([0,1,0],rot,rot2)
position2 = self.rotater([t,-0.5,0],rot,rot2)
position3 = self.rotater([-t,-0.5,0],rot,rot2)
if rejecter:
if rejecter.accept(rii*position1[0],rii*position1[1],rii*position1[2]):
self.positions.append(position1)
if rejecter.accept(rii*position2[0],rii*position2[1],rii*position2[2]):
self.positions.append(position2)
if rejecter.accept(rii*position3[0],rii*position3[1],rii*position3[2]):
self.positions.append(position3)
else:
self.positions.append(position1)
self.positions.append(position2)
self.positions.append(position3)
elif(nshell==4):
if (mpi.rank == 0):
t = sqrt(3.0)/3.0
position1 = self.rotater([t,t,t],rot,rot2)
position2 = self.rotater([t,-t,-t],rot,rot2)
position3 = self.rotater([-t,-t,t],rot,rot2)
position4 = self.rotater([-t,t,-t],rot,rot2)
if rejecter:
if rejecter.accept(rii*position1[0],rii*position1[1],rii*position1[2]):
self.positions.append(position1)
if rejecter.accept(rii*position2[0],rii*position2[1],rii*position2[2]):
self.positions.append(position2)
if rejecter.accept(rii*position3[0],rii*position3[1],rii*position3[2]):
self.positions.append(position3)
if rejecter.accept(rii*position4[0],rii*position4[1],rii*position4[2]):
self.positions.append(position4)
else:
self.positions.append(position1)
self.positions.append(position2)
self.positions.append(position3)
self.positions.append(position4)
elif(nshell>=163):
if (nshell > mpi.procs and mpi.procs > 1):
p = 0
npp = 0
if mpi.procs > 2:
npp = nshell/(mpi.procs -1)
else:
npp = (nshell/2) if (rank == 0) else (nshell - nshell/2)
print "npp = %d"%npp
if(rank>0 and rank*npp!=nshell):
imax = rank*npp + 1
for i in xrange(1,imax):
h = -1.0+(2.0*(i-1.0)/(nshell-1.0))
if(i>1 and i<nshell):
p = (p+3.8/sqrt(nshell)*1.0/sqrt(1.0-h*h))%(2.0*pi)
rankmin = rank*npp + 1
rankmax = ((rank+1)*npp + 1) if (rank != procs -1) else (nshell + 1)
for i in xrange(rankmin, rankmax):
h = -1.0+(2.0*(i-1.0)/(nshell-1.0))
t = acos(h)
if(i>1 and i<nshell):
p = (p+3.8/sqrt(nshell)*1.0/sqrt(1.0-h*h))%(2.0*pi)
else:
p = 0
x = sin(t)*cos(p)
y = sin(t)*sin(p)
z = cos(t)
position = self.rotater([x,y,z],rot,rot2)
x = position[0]
y = position[1]
z = position[2]
if rejecter:
if rejecter.accept(rii*x,rii*y,rii*z):
self.positions.append([x,y,z])
else:
self.positions.append([x,y,z])
else:
# let rank 0 do all the work
p = 0
if (mpi.rank == 0):
for i in xrange(1, nshell+1):
h = -1.0+(2.0*(i-1.0)/(nshell-1.0))
t = acos(h)
if(i>1 and i<nshell):
p = (p+3.8/sqrt(nshell)*1.0/sqrt(1.0-h*h))%(2.0*pi)
else:
p = 0
x = sin(t)*cos(p)
y = sin(t)*sin(p)
z = cos(t)
position = self.rotater([x,y,z],rot,rot2)
x = position[0]
y = position[1]
z = position[2]
if rejecter:
if rejecter.accept(rii*x,rii*y,rii*z):
self.positions.append([x,y,z])
else:
self.positions.append([x,y,z])
# now reduce some lengths for output
numNodes = mpi.allreduce(len(self.positions),mpi.SUM)
print "at r=%3.4g\t wanted %d;\t computed %d total nodes with\t mass=%3.4g" %(rii,nshell,numNodes,mi)
for n in xrange(len(self.positions)):
x = rii*self.positions[n][0]
y = rii*self.positions[n][1]
z = rii*self.positions[n][2]
if(nshell>1):
theta = acos(z/sqrt(x*x+y*y+z*z))
phi = atan2(y,x)
if (phi<0.0):
phi = phi + 2.0*pi
else:
theta = (thetaMax - thetaMin)/2.0
phi = (phiMax - phiMin)/2.0
if (theta<=thetaMax and theta>=thetaMin) and (phi<=phiMax and phi>=phiMin):
# run a final pass on the rejecter
if rejecter:
if rejecter.accept(x,y,z):
self.x.append(x)
self.y.append(y)
self.z.append(z)
self.m.append(mi)
self.H.append(SymTensor3d.one*(1.0/hi))
else:
self.x.append(x)
self.y.append(y)
self.z.append(z)
self.m.append(mi)
self.H.append(SymTensor3d.one*(1.0/hi))
ri = max(rmin, ri - dr)
# If requested, shift the nodes.
if offset:
for i in xrange(len(self.x)):
self.x[i] += offset[0]
self.y[i] += offset[1]
self.z[i] += offset[2]
print "Generated a total of %i nodes." % mpi.allreduce(len(self.x),mpi.SUM)
NodeGeneratorBase.__init__(self, False,
self.x, self.y, self.z, self.m, self.H)
return
def rotater(self,pos,rot1,rot2):
posp = [0,0,0]
for k in xrange(3):
for j in xrange(3):
posp[k] += pos[j]*rot1[k][j]
x = posp[0]
y = posp[1]
z = posp[2]
pos = [x,y,z]
posp= [0,0,0]
for k in xrange(3):
for j in xrange(3):
posp[k] += pos[j]*rot2[k][j]
return posp
#---------------------------------------------------------------------------
# Compute the number of vertices for a given shape at a specific refinement
# level.
# new formula for calculating number of points for a given subdivision level
# (Nf * Np(n) - Ne * Npe(n) + Nc)
# Nf = Number of faces of primitive shape
# Np(n) = Number of points in a triangle subdivided n times
# 2^(2n-1) + 3*2^(n-1) + 1
# Ne = Number of edges of primitive shape
# Npe(n) = Number of points along an edge of primitive shape subdivided n times
# 2^n + 1
# Nc = Number of corners
#---------------------------------------------------------------------------
def shapeCount(self, refinement, shape):
Nf = shape[0]
Ne = shape[1]
Nc = shape[2]
n = refinement
Npe = 2**n + 1
Np = 2**(2*n-1) + 3*(2**(n-1)) + 1
return (Nf * Np - Ne * Npe + Nc)
#---------------------------------------------------------------------------
# Get the position for the given node index.
#---------------------------------------------------------------------------
def localPosition(self, i):
assert i >= 0 and i < len(self.x)
assert len(self.x) == len(self.y) == len(self.z)
return Vector3d(self.x[i], self.y[i], self.z[i])
#---------------------------------------------------------------------------
# Get the mass for the given node index.
#---------------------------------------------------------------------------
def localMass(self, i):
assert i >= 0 and i < len(self.m)
return self.m[i]
#---------------------------------------------------------------------------
# Get the mass density for the given node index.
#---------------------------------------------------------------------------
def localMassDensity(self, i):
loc = Vector3d(0,0,0)
loc = self.localPosition(i) - self.offset
return self.densityProfileMethod(loc.magnitude())
#---------------------------------------------------------------------------
# Get the H tensor for the given node index.
#---------------------------------------------------------------------------
def localHtensor(self, i):
assert i >= 0 and i < len(self.H)
return self.H[i]
#---------------------------------------------------------------------------
# Numerically integrate the given density profile to determine the total
# enclosed mass.
#---------------------------------------------------------------------------
def integrateTotalMass(self, densityProfileMethod,
rmin, rmax,
thetaMin, thetaMax,
phiMin, phiMax,
nbins = 10000):
assert nbins > 0
assert nbins % 2 == 0
result = 0
dr = (rmax-rmin)/nbins
nbp = nbins/procs
binmin = nbp*rank if (rank!=0) else (1)
binmax = nbp*(rank+1) if (rank!=procs-1) else (nbins)
for i in xrange(binmin,binmax):
r1 = rmin + (i-1)*dr
r2 = rmin + i*dr
result += 0.5*dr*(r2*r2*densityProfileMethod(r2)+r1*r1*densityProfileMethod(r1))
result = result * (phiMax-phiMin) * (cos(thetaMin)-cos(thetaMax))
result = mpi.allreduce(result,mpi.SUM)
return result
#---------------------------------------------------------------------------
# Mechanics for creating and refining the icosahedron
#---------------------------------------------------------------------------
def addVertex(self,point):
length = sqrt(point[0]*point[0] + point[1]*point[1] + point[2]*point[2])
self.positions.append([point[0]/length,point[1]/length,point[2]/length])
self.index = self.index + 1
return self.index
def checkMiddlePoint(self,key):
exists = 0
myidx = 0
for i in xrange(len(self.middlePoints)):
if (self.middlePoints[i][0] == key):
exists = 1
myidx = self.middlePoints[i][1]
return exists, myidx
def getMiddlePoint(self,p1,p2):
firstIsSmaller = (p1<p2)
smallerIndex = 0
greaterIndex = 0
if firstIsSmaller:
smallerIndex = p1
greaterIndex = p2
else:
smallerIndex = p2
greaterIndex = p1
key = smallerIndex * (1e10) + greaterIndex # some giant number
# check if this key already exists in middlepoints
exists, idx = self.checkMiddlePoint(key)
if (exists):
return idx
# otherwise, not already cached, time to add one
point1 = self.positions[p1]
point2 = self.positions[p2]
middle = [(point1[0]+point2[0])/2.0,(point1[1]+point2[1])/2.0,(point1[2]+point2[2])/2.0]
idx = self.addVertex(middle)
self.middlePoints.append([key,idx-1])
return idx-1
def createIcoSphere(self,np):
n = 0
t = (1.0+sqrt(5.0))/2.0
# create 12 vertices of an icosahedron
self.addVertex([-1, t, 0])
self.addVertex([ 1, t, 0])
self.addVertex([-1,-t, 0])
self.addVertex([ 1,-t, 0])
self.addVertex([ 0,-1, t])
self.addVertex([ 0, 1, t])
self.addVertex([ 0,-1,-t])
self.addVertex([ 0, 1,-t])
self.addVertex([ t, 0,-1])
self.addVertex([ t, 0, 1])
self.addVertex([-t, 0,-1])
self.addVertex([-t, 0, 1])
# create the 20 initial faces
# 5 faces around point 0
self.faces.append([ 0,11, 5])
self.faces.append([ 0, 5, 1])
self.faces.append([ 0, 1, 7])
self.faces.append([ 0, 7,10])
self.faces.append([ 0,10,11])
# 5 adjacent faces
self.faces.append([ 1, 5, 9])
self.faces.append([ 5,11, 4])
self.faces.append([11,10, 2])
self.faces.append([10, 7, 6])
self.faces.append([ 7, 1, 8])
# 5 faces around point 3
self.faces.append([ 3, 9, 4])
self.faces.append([ 3, 4, 2])
self.faces.append([ 3, 2, 6])
self.faces.append([ 3, 6, 8])
self.faces.append([ 3, 8, 9])
# 5 adjacent faces
self.faces.append([ 4, 9, 5])
self.faces.append([ 2, 4,11])
self.faces.append([ 6, 2,10])
self.faces.append([ 8, 6, 7])
self.faces.append([ 9, 8, 1])
# now refine triangles until you're done
for i in xrange(np):
faces2 = []
for j in xrange(len(self.faces)):
x,y,z = self.faces[j][0], self.faces[j][1], self.faces[j][2]
a = self.getMiddlePoint(x,y)
b = self.getMiddlePoint(y,z)
c = self.getMiddlePoint(z,x)
faces2.append([x,a,c])
faces2.append([y,b,a])
faces2.append([z,c,b])
faces2.append([a,b,c])
self.faces = faces2
n = len(self.positions)
def createOctaSphere(self,np):
n = 0
t = sqrt(2.0)/2.0
# create the 6 vertices of the octahedron
self.addVertex([ 0, 0, 1])
self.addVertex([ t, t, 0])
self.addVertex([ t,-t, 0])
self.addVertex([-t,-t, 0])
self.addVertex([-t, t, 0])
self.addVertex([ 0, 0,-1])
# create the 8 initial faces
# 4 faces around point 0
self.faces.append([ 0, 1, 2])
self.faces.append([ 0, 2, 3])
self.faces.append([ 0, 3, 4])
self.faces.append([ 0, 4, 1])
# 4 faces around point 5
self.faces.append([ 5, 2, 1])
self.faces.append([ 5, 3, 2])
self.faces.append([ 5, 4, 3])
self.faces.append([ 5, 1, 4])
# now refine triangles until you're done
for i in xrange(np):
faces2 = []
for j in xrange(len(self.faces)):
x,y,z = self.faces[j][0], self.faces[j][1], self.faces[j][2]
a = self.getMiddlePoint(x,y)
b = self.getMiddlePoint(y,z)
c = self.getMiddlePoint(z,x)
faces2.append([x,a,c])
faces2.append([y,b,a])
faces2.append([z,c,b])
faces2.append([a,b,c])
self.faces = faces2
n = len(self.positions)
def createHexaSphere(self,np):
n = 0
t = sqrt(3.0)/2.0
# create the 5 vertices of the hexahedron
self.addVertex([ 0, 0, 1])
self.addVertex([ 0, 1, 0])
self.addVertex([ t,-0.5,0])
self.addVertex([-t,-0.5,0])
self.addVertex([ 0, 0,-1])
# create the 6 initial faces
# 3 faces around point 0
self.faces.append([ 0, 1, 2])
self.faces.append([ 0, 2, 3])
self.faces.append([ 0, 3, 1])
# 3 faces around point 4
self.faces.append([ 4, 2, 1])
self.faces.append([ 4, 3, 2])
self.faces.append([ 4, 1, 3])
# now refine triangles until you're done
for i in xrange(np):
faces2 = []
for j in xrange(len(self.faces)):
x,y,z = self.faces[j][0], self.faces[j][1], self.faces[j][2]
a = self.getMiddlePoint(x,y)
b = self.getMiddlePoint(y,z)
c = self.getMiddlePoint(z,x)
faces2.append([x,a,c])
faces2.append([y,b,a])
faces2.append([z,c,b])
faces2.append([a,b,c])
self.faces = faces2
n = len(self.positions)
def createCubicSphere(self,np):
n = 0
t = sqrt(3.0)/3.0
# create the 8 vertices of the cube
self.addVertex([-t, t, t])
self.addVertex([-t,-t, t])
self.addVertex([ t,-t, t])
self.addVertex([ t, t, t])
self.addVertex([ t, t,-t])
self.addVertex([ t,-t,-t])
self.addVertex([-t,-t,-t])
self.addVertex([-t, t,-t])
# create the 6 initial faces
# 5 faces around point 0
self.faces.append([ 0, 4, 7])
self.faces.append([ 0, 1, 7])
self.faces.append([ 0, 1, 2])
self.faces.append([ 0, 2, 3])
self.faces.append([ 0, 3, 4])
# 5 faces around point 5
self.faces.append([ 5, 2, 3])
self.faces.append([ 5, 3, 4])
self.faces.append([ 5, 4, 7])
self.faces.append([ 5, 6, 7])
self.faces.append([ 5, 2, 6])
# 2 faces around point 1
self.faces.append([ 1, 6, 7])
self.faces.append([ 1, 2, 6])
# now refine triangles until you're done
for i in xrange(np):
faces2 = []
for j in xrange(len(self.faces)):
x,y,z = self.faces[j][0], self.faces[j][1], self.faces[j][2]
a = self.getMiddlePoint(x,y)
b = self.getMiddlePoint(y,z)
c = self.getMiddlePoint(z,x)
faces2.append([x,a,c])
faces2.append([y,b,a])
faces2.append([z,c,b])
faces2.append([a,b,c])
self.faces = faces2
n = len(self.positions)
```
#### File: src/NodeGenerators/SphericalGenerator.py
```python
def SphericalGenerator(generator):
# Correct the mass.
n = len(generator.m)
for i in xrange(n):
ri = generator.localPosition(i).x
assert ri > 0.0
generator.m[i] *= ri*ri
return generator
```
#### File: src/NodeGenerators/StretchedLattice2d.py
```python
from math import *
from NodeGeneratorBase import *
from Spheral import Vector2d
from Spheral import Tensor2d
from Spheral import SymTensor2d
from Spheral import pair_double_double
from Spheral import vector_of_int, vector_of_double, vector_of_SymTensor2d, vector_of_vector_of_double
from SpheralTestUtilities import *
from Spheral import PairScalarFunctor, newtonRaphsonFindRoot
from SpheralGnuPlotUtilities import multiSort
import mpi
procID = mpi.rank
nProcs = mpi.procs
#-------------------------------------------------------------------------------
# Class to generate 2-D node positions in a stretched lattice
#-------------------------------------------------------------------------------
class GenerateStretchedLattice2d(NodeGeneratorBase):
class nrFunc(PairScalarFunctor):
def __init__(self,k,rho0,r0,eta,rp,rho,dr):
PairScalarFunctor.__init__(self)
self.k = k
self.rho0 = rho0
self.r0 = r0
self.eta = eta
self.rp = rp
self.rho = rho
self.const = k*r0**(2)*rho0*eta
self.dr = dr
return
def __call__(self,x):
fst = (self.rho(x) - self.rho(x-self.dr))/self.dr * (x-self.rp)*x*x
scd = self.rho(x) * (x*x + (x-self.rp)*(2)*x*x)
return pair_double_double(((x-self.rp)*x**(2)*self.rho(x)-self.const),fst + scd)
#---------------------------------------------------------------------------
# Constructor
#---------------------------------------------------------------------------
def __init__(self, nr, densityProfileMethod,
rmin = 0.0,
rmax = 1.0,
thetaMin = 0.0,
thetaMax = pi,
nNodePerh = 2.01,
offset=None,
m0ForMassMatching=None):
assert nr > 0
assert rmin >= 0
assert rmin < rmax
assert thetaMin < thetaMax
assert thetaMin >= 0.0 and thetaMin <= 2.0*pi
assert thetaMax >= 0.0 and thetaMax <= 2.0*pi
assert nNodePerh > 0.0
assert offset is None or len(offset)==3
if offset is None:
self.offset = Vector2d(0,0)
else:
self.offset = Vector2d(offset[0],offset[1])
self.nr = nr
self.rmin = rmin
self.rmax = rmax
self.thetaMin = thetaMin
self.thetaMax = thetaMax
self.nNodePerh = nNodePerh
self.xmin = Vector2d(-2.0*rmax,-2.0*rmax)
self.xmax = Vector2d(2.0*rmax,2.0*rmax)
# no reason to support a constant density method here, just use a regular lattice for that
self.densityProfileMethod = densityProfileMethod
# Determine how much total mass there is in the system.
targetMass = self.integrateTotalMass(self.densityProfileMethod,
rmin, rmax,
thetaMin, thetaMax)
#targetMass = self.integrateTotalMass(self.densityProfileMethod,
# rmax)
targetN = pi*(nr**2)
self.m0 = targetMass/targetN
self.vol = pi*(rmax**2)
# what this means is this currently only supports creating a full sphere and then
# cutting out the middle to rmin if rmin > 0
if m0ForMassMatching is None:
self.rho0 = targetMass/self.vol
else:
self.m0 = m0ForMassMatching
self.rho0 = targetN*self.m0/self.vol
print "Found total mass = {0:3.3e} with rho0 = {1:3.3e}".format(targetMass,self.rho0)
# compute kappa first
# k = 3/(self.rho0*rmax**3) * targetMass/(4.0*pi)
# print "Found kappa={0:3.3f}. Was that what you expected?".format(k)
nlat = nr
# create the unstretched lattice
self.xl, self.yl, self.ml, self.Hl = \
self.latticeDistribution(nlat,
self.rho0,
self.m0,
self.xmin, # (xmin, ymin, zmin)
self.xmax, # (xmax, ymax, zmax)
self.rmax,
self.nNodePerh)
self.rl = []
for i in xrange(len(self.xl)):
self.rl.append(sqrt(self.xl[i]**2+self.yl[i]**2))
print "Sorting unstretched lattice... %d elements" % len(self.rl)
multiSort(self.rl,self.xl,self.yl)
self.x = []
self.y = []
self.m = []
self.H = []
nx = 2*nlat+1
eta = (self.xmax[0] - self.xmin[0])/nx
print "Stretching lattice..."
dr = eta * 0.01 # this will essentially be the error in the new dumb way
r0p = 0
rp = 0
rn = 0
for i in xrange(1,len(self.rl)):
#print "%d / %d" % (i,len(self.rl))
r0 = self.rl[i]
if (abs(r0-r0p)/r0>1e-10):
sol = r0**2*self.rho0/2.0
iter = int(10*rmax // dr)
fn = 0
for j in xrange(iter+1):
rj = dr*j
rjj = dr*(j+1)
fj = rj * densityProfileMethod(rj)
fjj = rjj * densityProfileMethod(rjj)
fn = fn + 0.5*(fj+fjj)*dr
if (fn>=sol):
rn = rj
break
r0p = r0
if (rn <= rmax and rn > rmin):
self.x.append(self.xl[i] * rn/r0)
self.y.append(self.yl[i] * rn/r0)
self.m.append(self.ml[i])
self.H.append(self.Hl[i])
seededMass = sum(self.m)
mAdj = targetMass / seededMass
for i in xrange(len(self.m)):
self.m[i] = self.m[i] * mAdj
# Initialize the base class. If "serialInitialization" is True, this
# is where the points are broken up between processors as well.
serialInitialization = True
NodeGeneratorBase.__init__(self, serialInitialization,
self.x, self.y, self.m, self.H)
return
#---------------------------------------------------------------------------
# Get the position for the given node index.
#---------------------------------------------------------------------------
def localPosition(self, i):
assert i >= 0 and i < len(self.x)
assert len(self.x) == len(self.y)
return Vector2d(self.x[i], self.y[i])
#---------------------------------------------------------------------------
# Get the mass for the given node index.
#---------------------------------------------------------------------------
def localMass(self, i):
assert i >= 0 and i < len(self.m)
return self.m[i]
#---------------------------------------------------------------------------
# Get the mass density for the given node index.
#---------------------------------------------------------------------------
def localMassDensity(self, i):
loc = Vector2d(0,0)
loc = self.localPosition(i) - self.offset
return self.densityProfileMethod(loc.magnitude())
#---------------------------------------------------------------------------
# Get the H tensor for the given node index.
#---------------------------------------------------------------------------
def localHtensor(self, i):
assert i >= 0 and i < len(self.H)
return self.H[i]
#---------------------------------------------------------------------------
# Numerically integrate the given density profile to determine the total
# enclosed mass.
#---------------------------------------------------------------------------
def integrateTotalMass(self, densityProfileMethod,
rmin, rmax,
thetaMin, thetaMax,
nbins = 10000):
assert nbins > 0
assert nbins % 2 == 0
result = 0
dr = (rmax-rmin)/nbins
for i in xrange(1,nbins):
r1 = rmin + (i-1)*dr
r2 = rmin + i*dr
result += 0.5*dr*(r2*densityProfileMethod(r2)+r1*densityProfileMethod(r1))
result = result * (thetaMax-thetaMin)
return result
#-------------------------------------------------------------------------------
# Seed positions/masses on the unstretched lattice
#-------------------------------------------------------------------------------
def latticeDistribution(self, nr, rho0, m0,
xmin,
xmax,
rmax,
nNodePerh = 2.01):
assert nr > 0
assert rho0 > 0
nx = 2*nr+1
ny = 2*nr+1
dx = (xmax[0] - xmin[0])/nx
dy = (xmax[1] - xmin[1])/ny
n = nx*ny
hx = 1.0/(nNodePerh*dx)
hy = 1.0/(nNodePerh*dy)
H0 = SymTensor2d(hx, 0.0,
0.0, hy)
x = []
y = []
m = []
H = []
for j in xrange(ny):
for i in xrange(nx):
xx = xmin[0] + (i + 0.5)*dx
yy = xmin[1] + (j + 0.5)*dy
x.append(xx)
y.append(yy)
m.append(m0)
H.append(H0)
return x, y, m, H
```
#### File: src/NodeList/FluidNodeLists.py
```python
from SpheralCompiledPackages import *
from spheralDimensions import spheralDimensions
dims = spheralDimensions()
#-------------------------------------------------------------------------------
# The generic FluidNodeList pattern.
#-------------------------------------------------------------------------------
FluidNodeListFactoryString = """
def makeFluidNodeList%(dim)s(name,
eos,
numInternal = 0,
numGhost = 0,
hmin = 1.0e-20,
hmax = 1.0e20,
hminratio = 0.1,
nPerh = 2.01,
maxNumNeighbors = 500,
rhoMin = 1.0e-10,
rhoMax = 1e10,
# Neighboring stuff
NeighborType = TreeNeighbor%(dim)s,
searchType = GatherScatter,
kernelExtent = 2.0,
# Parameters only for NestedGridNeighbor (deprecated)
# numGridLevels = 31,
# topGridCellSize = 100.0,
# origin = Vector%(dim)s.zero,
# gridCellInfluenceRadius = 1,
# Parameters for TreeNeighbor
xmin = Vector%(dim)s.one * -10.0,
xmax = Vector%(dim)s.one * 10.0):
result = FluidNodeList%(dim)s(name, eos, numInternal, numGhost,
hmin, hmax, hminratio,
nPerh, maxNumNeighbors,
rhoMin, rhoMax)
if NeighborType == NestedGridNeighbor%(dim)s:
print "makeFluidNodeList Deprecation Warning: NestedGridNeighbor is deprecated: suggest using TreeNeighbor."
result._neighbor = NestedGridNeighbor%(dim)s(result, searchType,
kernelExtent = kernelExtent)
#numGridLevels, topGridCellSize,
#origin, kernelExtent,
#gridCellInfluenceRadius)
else:
result._neighbor = TreeNeighbor%(dim)s(result, searchType, kernelExtent, xmin, xmax)
result.registerNeighbor(result._neighbor)
result.eos = eos
return result
"""
#-------------------------------------------------------------------------------
# Create the different dimension implementations.
#-------------------------------------------------------------------------------
for dim in dims:
exec(FluidNodeListFactoryString % {"dim" : "%id" % dim})
```
#### File: Pybind11Wraps/ANEOS/ANEOSMOD.py
```python
from PYB11Generator import *
#from SpheralCommon import *
from spheralDimensions import *
dims = spheralDimensions()
from ANEOS import *
#-------------------------------------------------------------------------------
# Includes
#-------------------------------------------------------------------------------
PYB11includes = ['"Geometry/Dimension.hh"',
'"Material/PhysicalConstants.hh"',
'"Material/EquationOfState.hh"',
'"Field/Field.hh"',
'"SolidMaterial/ANEOS.hh"',
'"ANEOSWrappers.hh"',
'<vector>',
'<string>',
'<iterator>']
#-------------------------------------------------------------------------------
# Namespaces
#-------------------------------------------------------------------------------
PYB11namespaces = ["Spheral"]
#-------------------------------------------------------------------------------
# Functions
#-------------------------------------------------------------------------------
def initializeANEOS(in_filename = "std::string",
out_filename = "std::string",
izetl = "std::vector<int>"):
"""Initialize ANEOS with some rather arcane input.
in_filename : The name of the ANEOS input file, initializes the Fortran ANEOS library
out_filename : An optional file to write any output from the ANEOS intialization call
izetl : An array of the material numbers ("EOS#" in the ANEOS input file)
Note, these numbers must be the negative of the "EOS#" in the input
"""
return "void"
#-------------------------------------------------------------------------------
# Instantiate our dimensional types
#-------------------------------------------------------------------------------
for ndim in dims:
exec('''
ANEOS%(ndim)id = PYB11TemplateClass(ANEOS, template_parameters="%(Dimension)s")
''' % {"ndim" : ndim,
"Dimension" : "Dim<" + str(ndim) + ">"})
```
#### File: Pybind11Wraps/ArtificialViscosity/ArtificialViscosityAbstractMethods.py
```python
from PYB11Generator import *
@PYB11ignore
class ArtificialViscosityAbstractMethods:
@PYB11const
def Piij(self,
nodeListi = "const unsigned",
i = "const unsigned",
nodeListj = "const unsigned",
j = "const unsigned",
xi = "const Vector&",
etai = "const Vector&",
vi = "const Vector&",
rhoi = "const Scalar",
csi = "const Scalar",
Hi = "const SymTensor&",
xj = "const Vector&",
etaj = "const Vector&",
vj = "const Vector&",
rhoj = "const Scalar",
csj = "const Scalar",
Hj = "const SymTensor&"):
"Require all descendents to return the artificial viscous Pi = P/rho^2 as a tensor. Scalar viscosities should just return a diagonal tensor with their value along the diagonal."
return "std::pair<Tensor, Tensor>"
```
#### File: Pybind11Wraps/Boundary/PeriodicBoundary.py
```python
from PYB11Generator import *
from Boundary import *
from PlanarBoundary import *
from BoundaryAbstractMethods import *
from RestartMethods import *
@PYB11template("Dimension")
class PeriodicBoundary(PlanarBoundary):
PYB11typedefs = """
typedef typename %(Dimension)s::Scalar Scalar;
typedef typename %(Dimension)s::Vector Vector;
typedef typename %(Dimension)s::Tensor Tensor;
typedef typename %(Dimension)s::SymTensor SymTensor;
typedef typename %(Dimension)s::ThirdRankTensor ThirdRankTensor;
typedef typename %(Dimension)s::FourthRankTensor FourthRankTensor;
typedef typename %(Dimension)s::FifthRankTensor FifthRankTensor;
typedef typename %(Dimension)s::FacetedVolume FacetedVolume;
typedef GeomPlane<%(Dimension)s> Plane;
"""
#...........................................................................
# Constructors
def pyinit(self):
"Default constructor"
def pyinit1(self,
plane1 = "const Plane&",
plane2 = "const Plane&"):
"Construct a periodic boundary mapping between the two (enter/exit) planes"
#...........................................................................
# Methods
@PYB11virtual
def cullGhostNodes(self,
flagSet = "const FieldList<%(Dimension)s, int>&",
old2newIndexMap = "FieldList<%(Dimension)s, int>&",
numNodesRemoved = "std::vector<int>&"):
"Use a set of flags to cull out inactive ghost nodes."
return "void"
@PYB11virtual
def reset(self,
dataBase = "const DataBase<%(Dimension)s>&"):
"Overridable hook for clearing out the boundary condition."
return "void"
@PYB11virtual
@PYB11const
def label(self):
"Label for restart files"
return "std::string"
#............................................................................
@PYB11pycppname("applyGhostBoundary")
@PYB11const
def applyGhostBoundary0(self,
fieldBase = "FieldBase<%(Dimension)s>&"):
"Apply the boundary condition to the ghost node values in the given Field."
return "void"
@PYB11pycppname("applyGhostBoundary")
@PYB11virtual
@PYB11const
def applyGhostBoundary9(self,
field = "Field<%(Dimension)s, FacetedVolume>&"):
"Apply the boundary condition to the ghost node values in the given Field."
return "void"
#............................................................................
@PYB11pycppname("enforceBoundary")
@PYB11virtual
@PYB11const
def enforceBoundary9(self,
field = "Field<%(Dimension)s, FacetedVolume>&"):
"Apply the boundary condition to the violation node values in the given Field."
return "void"
#...........................................................................
# Properties
enter = PYB11property("const Plane&", "enterPlane", "setEnterPlane", doc="The first plane for periodic wrapping")
exit = PYB11property("const Plane&", "exitPlane", "setExitPlane", doc="The second plane for periodic wrapping")
#-------------------------------------------------------------------------------
# Inject methods
#-------------------------------------------------------------------------------
PYB11inject(BoundaryAbstractMethods, PeriodicBoundary, virtual=True, pure_virtual=False)
```
#### File: Pybind11Wraps/Boundary/PlanarBoundary.py
```python
from PYB11Generator import *
from Boundary import *
from RestartMethods import *
@PYB11template("Dimension")
@PYB11module("SpheralBoundary")
class PlanarBoundary(Boundary):
PYB11typedefs = """
typedef typename %(Dimension)s::Scalar Scalar;
typedef typename %(Dimension)s::Vector Vector;
typedef typename %(Dimension)s::Tensor Tensor;
typedef typename %(Dimension)s::SymTensor SymTensor;
typedef typename %(Dimension)s::ThirdRankTensor ThirdRankTensor;
typedef typename %(Dimension)s::FourthRankTensor FourthRankTensor;
typedef typename %(Dimension)s::FifthRankTensor FifthRankTensor;
typedef typename %(Dimension)s::FacetedVolume FacetedVolume;
typedef typename Boundary<%(Dimension)s>::BoundaryNodes BoundaryNodes;
typedef GeomPlane<%(Dimension)s> Plane;
"""
#...........................................................................
# Constructors
def pyinit(self):
"Default constructor"
def pyinit1(self,
enterPlane = "const Plane&",
exitPlane = "const Plane&"):
"Construct with enter/exit mapping planes"
#...........................................................................
# Virtual methods
@PYB11virtual
@PYB11const
def enterPlane(self):
"Get the enter plane"
return "const Plane&"
@PYB11virtual
def setEnterPlane(self, plane="const Plane&"):
"Set the enter plane"
return "void"
@PYB11virtual
@PYB11const
def exitPlane(self):
"Get the exit plane"
return "const Plane&"
@PYB11virtual
def setExitPlane(self, plane="const Plane&"):
"Set the exit plane"
return "void"
@PYB11virtual
@PYB11const
def valid(self):
return "bool"
@PYB11virtual
@PYB11const
def clip(self,
xmin = "Vector&",
xmax = "Vector&"):
"Override the boundary clip"
return "void"
#...........................................................................
# Methods
def setGhostNodes(self,
nodeList = "NodeList<%(Dimension)s>&",
presetControlNodes = "const std::vector<int>&"):
"Set the ghost nodes for a predefined set of control nodes"
return "void"
@PYB11const
def mapPosition(self,
position = "const Vector&",
enterPlane = "const Plane&",
exitPlane = "const Plane&"):
"Map a position through the enter/exit plane transformation"
return "Vector"
@PYB11const
def facesOnPlane(self,
mesh = "const Mesh<%(Dimension)s>&",
plane = "const Plane&",
tol = "const Scalar"):
"Provide a method to identify tessellation faces on a plane."
return "std::vector<unsigned>"
#-------------------------------------------------------------------------------
# Inject restart methods
#-------------------------------------------------------------------------------
PYB11inject(BoundaryAbstractMethods, PlanarBoundary, virtual=True, pure_virtual=False)
PYB11inject(RestartMethods, PlanarBoundary)
```
#### File: Pybind11Wraps/CRKSPH/CRKSPHVariant.py
```python
from PYB11Generator import *
from CRKSPHHydroBase import *
@PYB11template("Dimension")
@PYB11module("SpheralCRKSPH")
@PYB11dynamic_attr
class CRKSPHVariant(CRKSPHHydroBase):
"CRKSPHVariant -- A development variant of CRKSPH for experimentation."
PYB11typedefs = """
typedef typename %(Dimension)s::Scalar Scalar;
typedef typename %(Dimension)s::Vector Vector;
typedef typename %(Dimension)s::Tensor Tensor;
typedef typename %(Dimension)s::SymTensor SymTensor;
typedef typename %(Dimension)s::ThirdRankTensor ThirdRankTensor;
typedef typename %(Dimension)s::FourthRankTensor FourthRankTensor;
typedef typename %(Dimension)s::FifthRankTensor FifthRankTensor;
typedef typename Physics<%(Dimension)s>::TimeStepType TimeStepType;
"""
def pyinit(self,
smoothingScaleMethod = "const SmoothingScaleBase<%(Dimension)s>&",
Q = "ArtificialViscosity<%(Dimension)s>&",
W = "const TableKernel<%(Dimension)s>&",
WPi = "const TableKernel<%(Dimension)s>&",
filter = "const double",
cfl = "const double",
useVelocityMagnitudeForDt = "const bool",
compatibleEnergyEvolution = "const bool",
evolveTotalEnergy = "const bool",
XSPH = "const bool",
densityUpdate = "const MassDensityType",
HUpdate = "const HEvolutionType",
correctionOrder = "const RKOrder",
volumeType = "const RKVolumeType",
epsTensile = "const double",
nTensile = "const double",
limitMultimaterialTopology = "const bool"):
"Constructor"
#...........................................................................
# Virtual methods
@PYB11virtual
def initializeProblemStartup(self, dataBase = "DataBase<%(Dimension)s>&"):
"Tasks we do once on problem startup."
return "void"
@PYB11virtual
def initialize(self,
time = "const Scalar",
dt = "const Scalar",
dataBase = "const DataBase<%(Dimension)s>&",
state = "State<%(Dimension)s>&",
derivs = "StateDerivatives<%(Dimension)s>&"):
"Initialize the Hydro before we start a derivative evaluation."
return "void"
@PYB11virtual
@PYB11const
def evaluateDerivatives(self,
time = "const Scalar",
dt = "const Scalar",
dataBase = "const DataBase<%(Dimension)s>&",
state = "const State<%(Dimension)s>&",
derivs = "StateDerivatives<%(Dimension)s>&"):
"""Evaluate the derivatives for the principle hydro variables:
mass density, velocity, and specific thermal energy."""
return "void"
```
#### File: Pybind11Wraps/Distributed/SortAndDivideRedistributeNodes.py
```python
from PYB11Generator import *
from RedistributeNodes import *
@PYB11template("Dimension")
class SortAndDivideRedistributeNodes(RedistributeNodes):
"""SortAndDivideRedistributeNodes -- (Re)domain decompose the nodes by using
a sort and divide algorithm.
This is a template base class -- the actual dimension dependent objects
are:
SortAndDivideRedistributeNodes1d
SortAndDivideRedistributeNodes2d
SortAndDivideRedistributeNodes3d"""
PYB11typedefs = """
typedef typename KeyTraits::Key Key;
typedef typename %(Dimension)s::Scalar Scalar;
typedef typename %(Dimension)s::Vector Vector;
typedef typename %(Dimension)s::Tensor Tensor;
typedef typename %(Dimension)s::SymTensor SymTensor;
"""
#...........................................................................
# Constructors
def pyinit(self,
Hextent = "const double"):
"Constructor"
#...........................................................................
# Methods
@PYB11const
def sortByPositions(self,
domainNodes = "std::list<DomainNode<%(Dimension)s> >&",
positionIndex = "const int"):
"""Sort the given set of DomainNodes by a position component (where positionIndex
maps as
0 ==> x,
1 ==> y,
2 ==> z),
and return the result as a list<DomainNode>."""
return "void"
@PYB11const
def popFrontNodes(self,
sortedCandidateNodes = "std::list<DomainNode<%(Dimension)s> >&",
targetDomainWork = "const double",
positionIndex = "const int"):
"""Given a sorted list of domain nodes, pop nodes off of the front building
a return list until the requested work is met."""
return "std::list<DomainNode<%(Dimension)s> >"
@PYB11const
def shapeTensor(self,
domainNodes = "const std::vector<DomainNode<%(Dimension)s> >&"):
"Compute the appropriate shape tensor for a set of domain nodes."
return "typename SymTensor::EigenStructType"
@PYB11const
def rotateIntoShapeTensorFrame(self,
shapeTensor = "const typename SymTensor::EigenStructType&",
domainNodes = "std::vector<DomainNode<%(Dimension)s> >&"):
"""Apply the necessary rotation to the positions of the domain nodes to transform into
the primary frame of the given shape tensor."""
return "void"
@PYB11const
def reduceDomainNodes(self,
nodes = "const std::vector<DomainNode<%(Dimension)s> >&",
targetProc = "const int"):
"Reduce a vector<DomainNode> to the given processor."
return "std::vector<DomainNode<%(Dimension)s> >"
@PYB11const
def broadcastDomainNodes(self,
nodes = "const std::vector<DomainNode<%(Dimension)s> >&",
sendProc = "const int"):
"Broadcast a vector<DomainNode> from the given processor."
return "std::vector<DomainNode<%(Dimension)s> >"
#...........................................................................
# Properties
Hextent = PYB11property("double", "Hextent", "Hextent")
```
#### File: Pybind11Wraps/Field/Field.py
```python
import inspect
from PYB11Generator import *
from FieldBase import FieldBase
#-------------------------------------------------------------------------------
# Field
#-------------------------------------------------------------------------------
@PYB11template("Dimension", "Value")
@PYB11module("SpheralField")
class Field(FieldBase):
PYB11typedefs = """
typedef Field<%(Dimension)s, %(Value)s> FieldType;
"""
def pyinit(self, name="std::string"):
"Construct with a name"
def pyinit1(self, name="std::string", nodeList="const FieldType&"):
"Construct as a copy of a Field with a new name"
def pyinit2(self, name="std::string", nodeList="const NodeList<%(Dimension)s>&"):
"Construct with a name and NodeList"
def pyinit3(self,
name = "std::string",
nodeList = "const NodeList<%(Dimension)s>&",
val = "%(Value)s"):
"Construct with a name, NodeList, and initial value"
# def pyinit4(self, rhs="const PYB11TrampolineField<%(Dimension)s, %(Value)s>&"):
# "Copy constructor"
#...........................................................................
# Comparators
def __eq__(self):
return
def __ne__(self):
return
def __eq__(self, rhs="%(Value)s()"):
"Equivalence comparision with a %(Value)s"
return "bool"
def __ne__(self, rhs="%(Value)s()"):
"Not equal comparision with a %(Value)s"
return "bool"
#...........................................................................
# Sequence methods
@PYB11cppname("size")
@PYB11const
def __len__(self):
return "unsigned"
@PYB11cppname("operator[]")
@PYB11returnpolicy("reference_internal")
@PYB11implementation('[](FieldType& self, int i) { const int n = self.size(); if (i >= n) throw py::index_error(); return &self[(i %% n + n) %% n]; }')
def __getitem__(self):
#return "%(Value)s&"
return
@PYB11implementation("[](FieldType& self, int i, const %(Value)s v) { const int n = self.size(); if (i >= n) throw py::index_error(); self[(i %% n + n) %% n] = v; }")
def __setitem__(self):
"Set a value"
@PYB11implementation("[](const FieldType& self) { return py::make_iterator(self.begin(), self.end()); }, py::keep_alive<0,1>()")
def __iter__(self):
"Python iteration through a Field."
@PYB11returnpolicy("reference_internal")
@PYB11implementation("[](FieldType& self, int i) { const int n = self.size(); if (i >= n) throw py::index_error(); return &self[(i %% n + n) %% n]; }")
def __call__(self):
"Index into a Field"
#return "%(Value)s&"
return
#...........................................................................
# FieldBase virtual methods
@PYB11virtual
@PYB11const
def size(self):
"Number of elements"
return "unsigned"
@PYB11virtual
def Zero(self):
"Set all element values equal to zero"
return "void"
@PYB11virtual
def setNodeList(self, nodeList="const NodeList<%(Dimension)s>&"):
"Register this Field with the given NodeList"
return "void"
@PYB11virtual
def resizeField(self, size="unsigned"):
"Set the number of elements"
return "void"
@PYB11virtual
def resizeFieldInternal(self, size="unsigned", oldFirstGhostNode="unsigned"):
"Set the number of internal elements"
return "void"
@PYB11virtual
def resizeFieldGhost(self, size="unsigned"):
"Set the number of ghost elements"
return "void"
@PYB11virtual
def deleteElement(self, nodeID="int"):
"Delete the element at the given index"
return "void"
@PYB11virtual
def deleteElements(self, nodeIDs="const std::vector<int>&"):
"Delete the elements at the given indices"
return "void"
@PYB11virtual
@PYB11const
def packValues(self, nodeIDs="const std::vector<int>&"):
"Serialize the indicated elements into a vector<char>"
return "std::vector<char>"
@PYB11virtual
def unpackValues(self,
nodeIDs="const std::vector<int>&",
buffer = "const std::vector<char>&"):
"Deserialize values from the given buffer"
return "void"
@PYB11virtual
def copyElements(self,
fromIndices="const std::vector<int>&",
toIndices="const std::vector<int>&"):
"Copy a range of values from/to elements of the Field"
return "void"
#...........................................................................
# Methods
@PYB11const
@PYB11implementation("[](const Field<%(Dimension)s, %(Value)s>& self, const int precision) -> py::bytes { return py::bytes(self.string(precision)); }")
def string(self, precision=("const int", "20")):
"Serialize Field to a string"
return "py::bytes"
@PYB11pycppname("string")
@PYB11implementation("[](Field<%(Dimension)s, %(Value)s>& self, const py::bytes& buf) -> void { self.string(static_cast<std::string>(buf)); }")
def string1(self, buf="py::bytes&"):
"Deserialize from a string"
return "void"
@PYB11const
@PYB11implementation("[](const Field<%(Dimension)s, %(Value)s>& self) -> py::list { const auto vals = self.internalValues(); py::list result; for (const auto& x: vals) result.append(x); return result; }")
def internalValues(self):
"Return a python list (as a copy) of just the internal values"
return "py::list"
@PYB11const
@PYB11implementation("[](const Field<%(Dimension)s, %(Value)s>& self) -> py::list { const auto vals = self.ghostValues(); py::list result; for (const auto& x: vals) result.append(x); return result; }")
def ghostValues(self):
"Return a python list (as a copy) of just the ghost values"
return "py::list"
@PYB11const
@PYB11implementation("[](const Field<%(Dimension)s, %(Value)s>& self) -> py::list { const auto vals = self.allValues(); py::list result; for (const auto& x: vals) result.append(x); return result; }")
def allValues(self):
"Return a python list (as a copy) of all values in the Field"
return "py::list"
#...........................................................................
# Properties
numElements = PYB11property("unsigned", doc="Number of elements in field")
numInternalElements = PYB11property("unsigned", doc="Number of internal elements in field")
numGhostElements = PYB11property("unsigned", doc="Number of ghost elements in field")
```
#### File: Pybind11Wraps/FieldList/FieldListBase.py
```python
from PYB11Generator import *
#-------------------------------------------------------------------------------
# FieldBase
#-------------------------------------------------------------------------------
@PYB11template("Dimension")
@PYB11module("SpheralField")
class FieldListBase:
"Base class for FieldLists -- not much to implement in Python."
# def pyinit(self, name="std::string"):
# "Construct with a name"
# def pyinit1(self, name="std::string", nodeList="const NodeList<%(Dimension)s>&"):
# "Construct with a name and NodeList"
```
#### File: Pybind11Wraps/FileIO/FileIO.py
```python
from PYB11Generator import *
from FileIOAbstractMethods import *
from FileIOTemplateMethods import *
from spheralDimensions import *
dims = spheralDimensions()
@PYB11module("SpheralFileIO")
class FileIO:
"Abstract base class for FileIO objects"
#...........................................................................
# Constructors
def pyinit0(self):
"Default constructor"
def pyinit1(self,
filename = "const std::string",
access = "AccessType"):
"Construct with a given file name and access"
#...........................................................................
# Abstract methods
@PYB11pure_virtual
def open(self,
fileName = "const std::string",
access = "AccessType"):
"Open a file for IO"
return "void"
@PYB11pure_virtual
def close(self):
"Close the current file we're pointing at"
return "void"
#...........................................................................
# Virtual methods
@PYB11virtual
@PYB11noconvert
def write_unsigned_int(self,
value = "const unsigned int",
pathName = "const std::string"):
"Write an unsigned int"
return "void"
@PYB11virtual
@PYB11noconvert
def write_size_t(self,
value = "const size_t",
pathName = "const std::string"):
"Write a size_t"
return "void"
@PYB11virtual
@PYB11noconvert
def write_int(self,
value = "const int",
pathName = "const std::string"):
"Write an int"
return "void"
@PYB11virtual
@PYB11noconvert
def write_bool(self,
value = "const bool",
pathName = "const std::string"):
"Write a bool"
return "void"
@PYB11virtual
@PYB11noconvert
def write_double(self,
value = "const double",
pathName = "const std::string"):
"Write a double"
return "void"
@PYB11virtual
@PYB11noconvert
def write_string(self,
value = "const std::string",
pathName = "const std::string"):
"Write a std::string"
return "void"
@PYB11virtual
@PYB11const
@PYB11noconvert
def read_unsigned_int(self,
pathName = "const std::string"):
"Read an unsigned int"
return "unsigned int"
@PYB11virtual
@PYB11const
@PYB11noconvert
def read_size_t(self,
pathName = "const std::string"):
"Read a size_t"
return "size_t"
@PYB11virtual
@PYB11const
@PYB11noconvert
def read_int(self,
pathName = "const std::string"):
"Read an int"
return "int"
@PYB11virtual
@PYB11const
@PYB11noconvert
def read_bool(self,
pathName = "const std::string"):
"Read a bool"
return "bool"
@PYB11virtual
@PYB11const
@PYB11noconvert
def read_double(self,
pathName = "const std::string"):
"Read a double"
return "double"
@PYB11virtual
@PYB11const
@PYB11noconvert
def read_string(self,
pathName = "const std::string"):
"Read a std::string"
return "std::string"
#...........................................................................
# Methods
for ndim in dims:
for ttype in ("Scalar",
"Vector",
"Tensor",
"SymTensor",
"ThirdRankTensor"):
exec('''
@PYB11pycppname("write")
@PYB11virtual
@PYB11noconvert
def write%(ttype)sFL%(ndim)i(self,
value = "const FieldList<Dim<%(ndim)i>, Dim<%(ndim)i>::%(ttype)s>&",
pathName = "const std::string"):
"Write FieldList<Dim<%(ndim)i, %(ttype)s>"
return "void"
@PYB11pycppname("read")
@PYB11virtual
@PYB11const
@PYB11noconvert
def read%(ttype)sFL%(ndim)i(self,
value = "FieldList<Dim<%(ndim)i>, Dim<%(ndim)i>::%(ttype)s>&",
pathName = "const std::string"):
"Read FieldList<Dim<%(ndim)i, %(ttype)s>"
return "void"
@PYB11pycppname("write")
@PYB11virtual
@PYB11noconvert
def write%(ttype)sFV%(ndim)i(self,
value = "const Field<Dim<%(ndim)i>, std::vector<Dim<%(ndim)i>::%(ttype)s>>&",
pathName = "const std::string"):
"Write Field<Dim<%(ndim)i, vector<%(ttype)s>>"
return "void"
@PYB11pycppname("read")
@PYB11virtual
@PYB11const
@PYB11noconvert
def read%(ttype)sFV%(ndim)i(self,
value = "Field<Dim<%(ndim)i>, std::vector<Dim<%(ndim)i>::%(ttype)s>>&",
pathName = "const std::string"):
"Read Field<Dim<%(ndim)i, vector<%(ttype)s>>"
return "void"
''' % {"ndim" : ndim,
"ttype" : ttype})
#......................................................................
exec('''
@PYB11pycppname("write")
@PYB11virtual
@PYB11noconvert
def writeintFL%(ndim)i(self,
value = "const FieldList<Dim<%(ndim)i>, int>&",
pathName = "const std::string"):
"Write FieldList<Dim<%(ndim)i, int>"
return "void"
@PYB11pycppname("read")
@PYB11virtual
@PYB11const
@PYB11noconvert
def readintFL%(ndim)i(self,
value = "FieldList<Dim<%(ndim)i>, int>&",
pathName = "const std::string"):
"Read FieldList<Dim<%(ndim)i, int>"
return "void"
@PYB11pycppname("write")
@PYB11virtual
@PYB11noconvert
def writeunsignedFL%(ndim)i(self,
value = "const FieldList<Dim<%(ndim)i>, unsigned>&",
pathName = "const std::string"):
"Write FieldList<Dim<%(ndim)i, unsigned>"
return "void"
@PYB11pycppname("read")
@PYB11virtual
@PYB11const
@PYB11noconvert
def readunsignedFL%(ndim)i(self,
value = "FieldList<Dim<%(ndim)i>, unsigned>&",
pathName = "const std::string"):
"Read FieldList<Dim<%(ndim)i, unsigned>"
return "void"
@PYB11pycppname("write")
@PYB11virtual
@PYB11noconvert
def writeintFV%(ndim)i(self,
value = "const Field<Dim<%(ndim)i>, std::vector<int>>&",
pathName = "const std::string"):
"Write Field<Dim<%(ndim)i, vector<int>>"
return "void"
@PYB11pycppname("read")
@PYB11virtual
@PYB11const
@PYB11noconvert
def readintFV%(ndim)i(self,
value = "Field<Dim<%(ndim)i>, std::vector<int>>&",
pathName = "const std::string"):
"Read Field<Dim<%(ndim)i, vector<int>>"
return "void"
''' % {"ndim" : ndim})
#...........................................................................
for ndim in xrange(1,4): # These dimensional methods are always supported
exec('''
@PYB11pycppname("write")
@PYB11noconvert
def writePlane%(ndim)i(self,
value = "const GeomPlane<Dim<%(ndim)i>>&",
pathName = "const std::string"):
"Write a Plane%(ndim)id"
return "void"
@PYB11pycppname("read")
@PYB11const
@PYB11noconvert
def readPlane%(ndim)i(self,
value = "GeomPlane<Dim<%(ndim)i>>&",
pathName = "const std::string"):
"Read a Plane%(ndim)id"
return "void"
''' % {"ndim" : ndim})
@PYB11pycppname("write")
def write_uniform_random(self,
value = "const uniform_random&",
pathName = "const std::string"):
"Write random number generator uniform_random"
return "void"
@PYB11pycppname("read")
@PYB11const
def read_uniform_random(self,
value = "uniform_random&",
pathName = "const std::string"):
"Read random number generator uniform_random"
return "void"
@PYB11const
def splitPathComponents(self, pathName="const std::string"):
"A helper function to split a string up into substrings delimited by '/'."
return "std::vector<std::string>"
@PYB11const
def groupName(self, pathName="const std::string"):
"Return the group (directory) component of a path."
return "std::string"
@PYB11const
def variableName(self, pathName="const std::string"):
"Return the variable component of a path."
return "std::string"
@PYB11implementation("[](FileIO& self, py::handle thing, py::handle path) { self.writeObject(thing.ptr(), path.ptr()); }")
@PYB11noconvert
def writeObject(self,
thing = "py::handle",
path = "py::handle"):
"Handle a generic python object through serialization"
return "void"
@PYB11returnpolicy("take_ownership")
@PYB11const
@PYB11implementation("[](FileIO& self, py::handle path) { return py::handle(self.readObject(path.ptr())); }")
@PYB11noconvert
def readObject(self,
path = "py::handle"):
"Return a generic python object from deserialization."
return "py::handle"
#...........................................................................
# Properties
fileName = PYB11property("const std::string&", "fileName", doc="The current file name")
access = PYB11property("AccessType", "access", doc="The access type of the currently open file")
fileOpen = PYB11property("bool", "fileOpen", doc="Is the file currently open?")
#-------------------------------------------------------------------------------
# Inject the abstract interface
#-------------------------------------------------------------------------------
PYB11inject(FileIOAbstractMethods, FileIO, virtual=False, pure_virtual=True)
PYB11inject(FileIOTemplateMethods, FileIO)
```
#### File: Pybind11Wraps/FileIO/SiloFileIO.py
```python
from PYB11Generator import *
from FileIO import *
from FileIOAbstractMethods import *
from FileIOTemplateMethods import *
from spheralDimensions import *
dims = spheralDimensions()
class SiloFileIO(FileIO):
"Handle FileIO for silo files"
#...........................................................................
# Constructors
def pyinit0(self):
"Default constructor"
def pyinit1(self,
filename = "const std::string",
access = "AccessType"):
"Open a silo file with a given file name and access"
#...........................................................................
# Override abstract methods
@PYB11virtual
def open(self,
fileName = "const std::string",
access = "AccessType"):
"Open a file for IO"
return "void"
@PYB11virtual
def close(self):
"Close the current file we're pointing at"
return "void"
#-------------------------------------------------------------------------------
# Override the required virtual interface
#-------------------------------------------------------------------------------
PYB11inject(FileIOAbstractMethods, SiloFileIO, virtual=True, pure_virtual=False)
#PYB11inject(FileIOTemplateMethods, SiloFileIO)
```
#### File: Pybind11Wraps/FSISPH/SolidFSISPHHydroBase.py
```python
from PYB11Generator import *
from SolidSPHHydroBase import *
from RestartMethods import *
@PYB11template("Dimension")
@PYB11module("SpheralFSISPH")
class SolidFSISPHHydroBase(SolidSPHHydroBase):
"SolidFSISPHHydroBase -- SolidSPHHydro modified for large density discontinuities"
PYB11typedefs = """
typedef typename %(Dimension)s::Scalar Scalar;
typedef typename %(Dimension)s::Vector Vector;
typedef typename %(Dimension)s::Tensor Tensor;
typedef typename %(Dimension)s::SymTensor SymTensor;
typedef typename Physics<%(Dimension)s>::TimeStepType TimeStepType;
"""
def pyinit(smoothingScaleMethod = "const SmoothingScaleBase<%(Dimension)s>&",
dataBase = "DataBase<%(Dimension)s>&",
Q = "ArtificialViscosity<%(Dimension)s>&",
slides = "SlideSurface<%(Dimension)s>&",
W = "const TableKernel<%(Dimension)s>&",
filter = "const double",
cfl = "const double",
surfaceForceCoefficient = "const double",
densityStabilizationCoefficient = "const double",
specificThermalEnergyDiffusionCoefficient = "const double",
xsphCoefficient = "const double",
interfaceMethod = "const InterfaceMethod",
kernelAveragingMethod = "const KernelAveragingMethod",
sumDensityNodeLists = "std::vector<int>",
useVelocityMagnitudeForDt = "const bool",
compatibleEnergyEvolution = "const bool",
evolveTotalEnergy = "const bool",
gradhCorrection = "const bool",
XSPH = "const bool",
correctVelocityGradient = "const bool",
densityUpdate = "const MassDensityType",
HUpdate = "const HEvolutionType",
epsTensile = "const double",
nTensile = "const double",
damageRelieveRubble = "const bool",
strengthInDamage = "const bool",
xmin = "const Vector&",
xmax = "const Vector&"):
"SolidFSISPHHydroBase constructor"
#...........................................................................
# Virtual methods
@PYB11virtual
@PYB11const
def evaluateDerivatives(time = "const Scalar",
dt = "const Scalar",
dataBase = "const DataBase<%(Dimension)s>&",
state = "const State<%(Dimension)s>&",
derivs = "StateDerivatives<%(Dimension)s>&"):
"""Evaluate the derivatives for the principle hydro
mass density, velocity, and specific thermal energy."""
return "void"
@PYB11virtual
def initializeProblemStartup(dataBase = "DataBase<%(Dimension)s>&"):
"register the surface normals w/ the database"
return "void"
@PYB11virtual
def initialize(time = "const Scalar",
dt = "const Scalar",
dataBase = "const DataBase<%(Dimension)s>&",
state = "State<%(Dimension)s>&",
derivs = "StateDerivatives<%(Dimension)s>&"):
"calculates surface normals"
return "void"
@PYB11virtual
def registerState(dataBase = "DataBase<%(Dimension)s>&",
state = "State<%(Dimension)s>&"):
"register the surface normals"
return "void"
@PYB11virtual
def registerDerivatives(dataBase = "DataBase<%(Dimension)s>&",
derivs = "StateDerivatives<%(Dimension)s>&"):
"non-op place filler"
return "void"
#...........................................................................
# Properties
DepsDx = PYB11property("const FieldList<%(Dimension)s, Vector>&", "DepsDx", returnpolicy="reference_internal")
DPDx = PYB11property("const FieldList<%(Dimension)s, Vector>&", "DPDx", returnpolicy="reference_internal")
rawPressure = PYB11property("const FieldList<%(Dimension)s, Scalar>&", "rawPressure", returnpolicy="reference_internal")
interfaceNormals = PYB11property("const FieldList<%(Dimension)s, Vector>&", "interfaceNormals", returnpolicy="reference_internal")
interfaceFraction = PYB11property("const FieldList<%(Dimension)s, Scalar>&", "interfaceFraction", returnpolicy="reference_internal")
interfaceSmoothness = PYB11property("const FieldList<%(Dimension)s, Scalar>&", "interfaceSmoothness", returnpolicy="reference_internal")
newInterfaceNormals = PYB11property("const FieldList<%(Dimension)s, Vector>&", "newInterfaceNormals", returnpolicy="reference_internal")
newInterfaceFraction = PYB11property("const FieldList<%(Dimension)s, Scalar>&", "newInterfaceFraction", returnpolicy="reference_internal")
newInterfaceSmoothness = PYB11property("const FieldList<%(Dimension)s, Scalar>&", "newInterfaceSmoothness", returnpolicy="reference_internal")
slideSurfaces = PYB11property("SlideSurface<%(Dimension)s>&", "slideSurface", doc="The slide surface object")
surfaceForceCoefficient = PYB11property("double", "surfaceForceCoefficient", "surfaceForceCoefficient",doc="additional force between different materials ala Monaghan 2013.")
densityStabilizationCoefficient = PYB11property("double", "densityStabilizationCoefficient", "densityStabilizationCoefficient",doc="coefficient used to adjust velocity gradient to prevent unstable rho.")
specificThermalEnergyDiffusionCoefficient = PYB11property("double", "specificThermalEnergyDiffusionCoefficient", "specificThermalEnergyDiffusionCoefficient",doc="coefficient used to diffuse specificThermalEnergy amongst like nodes.")
xsphCoefficient = PYB11property("double", "xsphCoefficient", "xsphCoefficient",doc="coefficient to dial magnitude of xsph.")
sumDensityNodeLists = PYB11property("std::vector<int>", "sumDensityNodeLists", "sumDensityNodeLists",doc="control if rigorous density sum is applied to individual node lists.")
interfaceMethod = PYB11property("InterfaceMethod", "interfaceMethod", "interfaceMethod",doc="Flag to select how we want construct material interfaces")
kernelAveragingMethod = PYB11property("KernelAveragingMethod", "kernelAveragingMethod", "kernelAveragingMethod",doc="Flag to select our kernel type")
#-------------------------------------------------------------------------------
# Inject methods
#-------------------------------------------------------------------------------
PYB11inject(RestartMethods, SolidFSISPHHydroBase)
```
#### File: Pybind11Wraps/Geometry/Facet3d.py
```python
from PYB11Generator import *
@PYB11cppname("GeomFacet3d")
class Facet3d:
"""GeomFacet3d -- A facet of a polyhedron (triangular)
Note a Facet does not maintain it's own copies of its vertices -- the
assumption is that this is a Facet of a GeomPolyhedron and that polyhedron
owns the set of vertex positions."""
PYB11typedefs = """
typedef GeomFacet3d Facet3d;
typedef GeomFacet3d::Vector Vector;
"""
#...........................................................................
# Constructors
def pyinit(self):
"Default constructor"
def pyinit1(self,
vertices = "const std::vector<Vector>&",
ipoints = "const std::vector<unsigned>&"):
"Explicit constructor with vertices and point indices"
#...........................................................................
# Methods
@PYB11const
@PYB11pycppname("compare")
def compare0(self,
point = "const Vector&",
tol = ("const double tol", "1.0e-8")):
"""Is the given point above, below, or colinear with the facet?
1 => point above.
0 => point in plane of facet
-1 => points below."""
return "int"
@PYB11const
@PYB11pycppname("compare")
def compare1(self,
points = "const std::vector<Vector>&",
tol = ("const double tol", "1.0e-8")):
"""Compare a set of points:
1 => all points above.
0 => points both above and below (or equal).
-1 => all points below."""
return "int"
@PYB11const
@PYB11returnpolicy("reference_internal")
def point(self, index="const unsigned"):
return "const Vector&"
@PYB11const
def distance(self,
p = "const Vector&"):
"Compute the minimum distance from the facet to a point."
return "double"
@PYB11const
def closestPoint(self,
p = "const Vector&"):
"Compute the closest point on the facet to the given point."
return "Vector"
@PYB11const
def triangles(self):
"Split into triangular sub-facets."
return "std::vector<GeomFacet3d>"
#...........................................................................
# Comparisons
def __eq__(self):
return
def __ne__(self):
return
#...........................................................................
# Properties
ipoints = PYB11property("const std::vector<unsigned>&", returnpolicy="reference_internal")
normal = PYB11property("const Vector&", returnpolicy="reference_internal")
position = PYB11property("Vector")
area = PYB11property("double")
```
#### File: Pybind11Wraps/Geometry/FourthRankTensor.py
```python
from PYB11Generator import *
#-------------------------------------------------------------------------------
# FourthRankTensor template
#-------------------------------------------------------------------------------
@PYB11template("ndim")
class FourthRankTensor:
"Spheral fourth rank tensor (%(ndim)sx%(ndim)sx%(ndim)sx%(ndim)s) class"
# Static attributes
nrank = PYB11readonly(static=True, doc="Rank of the tensor", returnpolicy="copy")
nDimensions = PYB11readonly(static=True, doc="Number of dimensions", returnpolicy="copy")
numElements = PYB11readonly(static=True, doc="Number of elements stored in the type", returnpolicy="copy")
zero = PYB11readonly(static=True, doc="The zero value equivalent", returnpolicy="copy")
# Constructors
def pyinit0(self):
"Default constructor"
def pyinit1(self,
rhs = "const Dim<%(ndim)s>::FourthRankTensor"):
"Copy constructor"
def pyinit2(self,
rhs="double"):
"Construct setting the element values to a constant value."
# Sequence methods
@PYB11implementation("[](const Dim<%(ndim)s>::FourthRankTensor&) { return Dim<%(ndim)s>::FourthRankTensor::numElements; }")
def __len__(self):
"The size (number of elements) of the FourthRankTensor."
@PYB11implementation("[](const Dim<%(ndim)s>::FourthRankTensor &s, size_t i) { if (i >= Dim<%(ndim)s>::FourthRankTensor::numElements) throw py::index_error(); return s[i]; }")
@PYB11returnpolicy("reference_internal")
def __getitem__(self):
"Python indexing to get an element."
@PYB11implementation("[](Dim<%(ndim)s>::FourthRankTensor &s, size_t i, double v) { if (i >= Dim<%(ndim)s>::FourthRankTensor::numElements) throw py::index_error(); s[i] = v; }")
def __setitem__(self):
"Python indexing to set an element."
@PYB11implementation("[](const Dim<%(ndim)s>::FourthRankTensor &s) { return py::make_iterator(s.begin(), s.end()); }, py::keep_alive<0,1>()")
def __iter__(self):
"Python iteration through a FourthRankTensor."
@PYB11const
@PYB11returnpolicy("reference_internal")
def __call__(self,
i="Dim<%(ndim)s>::FourthRankTensor::size_type",
j="Dim<%(ndim)s>::FourthRankTensor::size_type",
k="Dim<%(ndim)s>::FourthRankTensor::size_type",
m="Dim<%(ndim)s>::FourthRankTensor::size_type"):
"Extract the (i,j,k,m) element."
return "double"
@PYB11pycppname("__call__")
@PYB11implementation("""[](Dim<%(ndim)s>::FourthRankTensor& self,
Dim<%(ndim)s>::FourthRankTensor::size_type i,
Dim<%(ndim)s>::FourthRankTensor::size_type j,
Dim<%(ndim)s>::FourthRankTensor::size_type k,
Dim<%(ndim)s>::FourthRankTensor::size_type m,
double val) { self(i,j,k,m) = val; }""")
def assignCall(self,
i="Dim<%(ndim)s>::FourthRankTensor::size_type",
j="Dim<%(ndim)s>::FourthRankTensor::size_type",
k="Dim<%(ndim)s>::FourthRankTensor::size_type",
m="Dim<%(ndim)s>::FourthRankTensor::size_type",
val="double"):
return "void"
# Methods
def Zero(self):
"Zero out the elements"
return "void"
@PYB11const
def doubledot(self, rhs="const RankNTensor<%(ndim)s, 4, GeomFourthRankTensor<%(ndim)s>>& rhs"):
return "double"
@PYB11const
def squareElements(self):
return "const Dim<%(ndim)s>::FourthRankTensor"
@PYB11const
def maxAbsElement(self):
return "double"
# Operators
def __neg__(self):
return
def __iadd__(self):
return
def __isub__(self):
return
def __add__(self):
return
def __sub__(self):
return
def __imul__(self, rhs="double()"):
return
def __idiv__(self, rhs="double()"):
return
def __mul__(self, rhs="double()"):
return
def __div__(self, rhs="double()"):
return
# Comparisons
def __eq__(self):
return
def __ne__(self):
return
def __lt__(self):
return
def __gt__(self):
return
def __le__(self):
return
def __ge__(self):
return
# String representation
@PYB11implementation("""
[](const Dim<%(ndim)s>::FourthRankTensor& self) {
std::string result = "FourthRankTensor" + std::to_string(%(ndim)s) + "d(";
for (auto val: self) result += (" " + std::to_string(val) + " ");
result += ")";
return result;
}""")
def __repr__(self):
return
#-------------------------------------------------------------------------------
# FourthRankTensor instantiations.
#-------------------------------------------------------------------------------
FourthRankTensor1d = PYB11TemplateClass(FourthRankTensor,
template_parameters = ("1"),
cppname = "Dim<1>::FourthRankTensor",
pyname = "FourthRankTensor1d")
FourthRankTensor2d = PYB11TemplateClass(FourthRankTensor,
template_parameters = ("2"),
cppname = "Dim<2>::FourthRankTensor",
pyname = "FourthRankTensor2d")
FourthRankTensor3d = PYB11TemplateClass(FourthRankTensor,
template_parameters = ("3"),
cppname = "Dim<3>::FourthRankTensor",
pyname = "FourthRankTensor3d")
```
#### File: Pybind11Wraps/Geometry/GeometryMOD.py
```python
from PYB11Generator import *
import types
# Forcibly override the common preamble
PYB11preamble = ""
# Define some useful type collections we're going to be wrapping in this module.
geomtypes = ["Vector", "Tensor", "SymTensor", "ThirdRankTensor", "FourthRankTensor", "FifthRankTensor", "FacetedVolume"]
PYB11namespaces = ["Spheral", "PolyClipper"]
# for ndim in (1, 2, 3):
# preamble += "typedef GeomPlane<Dim<%i>> Plane%id;\n" % (ndim, ndim)
# for gtype in geomtypes:
# preamble += "typedef Dim<%i>::%s %s%id;\n" % (ndim, gtype, gtype, ndim)
# Include files
PYB11includes = ['"Geometry/Dimension.hh"',
'"Geometry/GeomVector.hh"',
'"Geometry/Geom3Vector.hh"',
'"Geometry/GeomTensor.hh"',
'"Geometry/GeomSymmetricTensor.hh"',
'"Geometry/GeomThirdRankTensor.hh"',
'"Geometry/GeomFourthRankTensor.hh"',
'"Geometry/GeomFifthRankTensor.hh"',
'"Geometry/EigenStruct.hh"',
'"Geometry/computeEigenValues.hh"',
'"Geometry/GeomPolygon.hh"',
'"Geometry/GeomPolyhedron.hh"',
'"Geometry/GeomFacet2d.hh"',
'"Geometry/GeomFacet3d.hh"',
'"Geometry/invertRankNTensor.hh"',
'"Geometry/innerProduct.hh"',
'"Geometry/outerProduct.hh"',
'"Geometry/innerDoubleProduct.hh"',
'"Geometry/aggregateFacetedVolumes.hh"',
'"Geometry/CellFaceFlag.hh"',
'"Geometry/PolyClipperUtilities.hh"',
'"Geometry/GeometryRegistrar.hh"',
'"Field/Field.hh"',
'"Utilities/DataTypeTraits.hh"',
'<vector>',
'<sstream>']
# STL containers
for element in geomtypes:
for ndim in (1, 2, 3):
exec('''
vector_of_%(mangle)s = PYB11_bind_vector("%(element)s", opaque=True, local=False)
vector_of_vector_of_%(mangle)s = PYB11_bind_vector("std::vector<%(element)s>", opaque=True, local=False)
''' % {"element": "Dim<" + str(ndim) + ">::" + element,
"mangle" : element + str(ndim) + "d"})
vector_of_Facet2d = PYB11_bind_vector("GeomFacet2d", opaque=True, local=False)
vector_of_Facet3d = PYB11_bind_vector("GeomFacet3d", opaque=True, local=False)
vector_of_Plane1d = PYB11_bind_vector("GeomPlane<Dim<1>>", opaque=True, local=False)
vector_of_Plane2d = PYB11_bind_vector("GeomPlane<Dim<2>>", opaque=True, local=False)
vector_of_Plane3d = PYB11_bind_vector("GeomPlane<Dim<3>>", opaque=True, local=False)
vector_of_CellFaceFlag = PYB11_bind_vector("CellFaceFlag", opaque=True, local=False)
# Get the objects wrapped in other files.
from Vector import Vector1d, Vector2d, Vector3d
from Tensor import Tensor1d, Tensor2d, Tensor3d
from SymTensor import SymTensor1d, SymTensor2d, SymTensor3d
from ThirdRankTensor import ThirdRankTensor1d, ThirdRankTensor2d, ThirdRankTensor3d
from FourthRankTensor import FourthRankTensor1d, FourthRankTensor2d, FourthRankTensor3d
from FifthRankTensor import FifthRankTensor1d, FifthRankTensor2d, FifthRankTensor3d
from EigenStruct import EigenStruct1d, EigenStruct2d, EigenStruct3d
from Plane import Plane1d, Plane2d, Plane3d
from Box1d import *
from Polygon import *
from Polyhedron import *
from Facet2d import *
from Facet3d import *
from CellFaceFlag import *
from GeometryRegistrar import *
#-------------------------------------------------------------------------------
# Enums
#-------------------------------------------------------------------------------
CoordinateType = PYB11enum(("Cartesian", "Spherical", "RZ"), export_values=True,
doc="The coorindate system types")
#-------------------------------------------------------------------------------
# Spheral PolyClipper bindings (using Spheral Vectors)
#-------------------------------------------------------------------------------
# Define the PolyClipper Polygon and Polyhedron
PolyClipperPolygon = PYB11_bind_vector("PolyClipper::Vertex2d<Spheral::GeomVectorAdapter<2>>", opaque=True, local=False)
PolyClipperPolyhedron = PYB11_bind_vector("PolyClipper::Vertex3d<Spheral::GeomVectorAdapter<3>>", opaque=True, local=False)
from PolyClipperVertex2d import *
from PolyClipperVertex3d import *
from PolyClipperPlane import *
PolyClipperPlane2d = PYB11TemplateClass(Plane, template_parameters="GeomVectorAdapter<2>")
PolyClipperPlane3d = PYB11TemplateClass(Plane, template_parameters="GeomVectorAdapter<3>")
# Polygon methods.
@PYB11namespace("PolyClipper")
def initializePolygon(poly = "PolyClipperPolygon&",
positions = "const std::vector<Dim<2>::Vector>&",
neighbors = "const std::vector<std::vector<int>>&"):
"Initialize a PolyClipper::Polygon from vertex positions and vertex neighbors."
return "void"
@PYB11namespace("PolyClipper")
@PYB11cppname("polygon2string<GeomVectorAdapter<2>>")
def polygon2string(poly = "const PolyClipperPolygon&"):
"Return a formatted string representation for a PolyClipper::Polygon."
return "std::string"
@PYB11implementation("""[](const PolyClipperPolygon& self) {
double zerothMoment;
Dim<2>::Vector firstMoment;
moments(zerothMoment, firstMoment, self);
return py::make_tuple(zerothMoment, firstMoment);
}""")
@PYB11namespace("PolyClipper")
@PYB11pycppname("moments")
def momentsPolygon(poly = "const PolyClipperPolygon&"):
"Compute the zeroth and first moment of a PolyClipper::Polygon."
return "py::tuple"
@PYB11namespace("PolyClipper")
def clipPolygon(poly = "PolyClipperPolygon&",
planes = "const std::vector<PolyClipperPlane2d>&"):
"Clip a PolyClipper::Polygon with a collection of planes."
return "void"
@PYB11namespace("PolyClipper")
@PYB11pycppname("collapseDegenerates")
def collapseDegeneratesPolygon(poly = "PolyClipperPolygon&",
tol = "const double"):
"Collapse edges in a PolyClipper::Polygon below the given tolerance."
return "void"
@PYB11namespace("PolyClipper")
def extractFaces(poly = "const PolyClipperPolygon&"):
"Compute the faces (as pairs of vertex indices) for the Polygon"
return "std::vector<std::vector<int>>"
@PYB11namespace("PolyClipper")
def commonFaceClips(poly = "const PolyClipperPolygon&",
faces = "const std::vector<std::vector<int>>&"):
"Find the common clipping planes for each face"
return "std::vector<std::set<int>>"
@PYB11namespace("PolyClipper")
def splitIntoTriangles(poly = "const PolyClipperPolygon&",
tol = ("const double", "0.0")):
"""Split a PolyClipper::Polygon into triangles.
The result is returned as a vector<vector<int>>, where each inner vector is a triple of
ints representing vertex indices in the input Polygon."""
return "std::vector<std::vector<int>>"
#-------------------------------------------------------------------------------
# Polyhedron methods.
#-------------------------------------------------------------------------------
@PYB11namespace("PolyClipper")
def initializePolyhedron(poly = "PolyClipperPolyhedron&",
positions = "const std::vector<Dim<3>::Vector>&",
neighbors = "const std::vector<std::vector<int>>&"):
"Initialize a PolyClipper::Polyhedron from vertex positions and vertex neighbors."
return "void"
@PYB11namespace("PolyClipper")
@PYB11cppname("polyhedron2string<GeomVectorAdapter<3>>")
def polyhedron2string(poly = "const PolyClipperPolyhedron&"):
"Return a formatted string representation for a PolyClipper::Polyhedron."
return "std::string"
@PYB11implementation("""[](const PolyClipperPolyhedron& self) {
double zerothMoment;
Dim<3>::Vector firstMoment;
moments(zerothMoment, firstMoment, self);
return py::make_tuple(zerothMoment, firstMoment);
}""")
@PYB11namespace("PolyClipper")
@PYB11pycppname("moments")
def momentsPolyhedron(poly = "const PolyClipperPolyhedron&"):
"Compute the zeroth and first moment of a PolyClipper::Polyhedron."
return "py::tuple"
@PYB11namespace("PolyClipper")
def clipPolyhedron(poly = "PolyClipperPolyhedron&",
planes = "const std::vector<PolyClipperPlane3d>&"):
"Clip a PolyClipper::Polyhedron with a collection of planes."
return "void"
@PYB11namespace("PolyClipper")
@PYB11pycppname("collapseDegenerates")
def collapseDegeneratesPolyhedron(poly = "PolyClipperPolyhedron&",
tol = "const double"):
"Collapse edges in a PolyClipper::Polyhedron below the given tolerance."
return "void"
@PYB11namespace("PolyClipper")
@PYB11pycppname("extractFaces")
def extractFacesPolyhedron(poly = "const PolyClipperPolyhedron&"):
"Compute the faces (as pairs of vertex indices) for the Polyhedron"
return "std::vector<std::vector<int>>"
@PYB11namespace("PolyClipper")
@PYB11pycppname("commonFaceClips")
def commonFaceClipsPolyhedron(poly = "const PolyClipperPolyhedron&",
faces = "const std::vector<std::vector<int>>&"):
"Find the common clipping planes for each face"
return "std::vector<std::set<int>>"
@PYB11namespace("PolyClipper")
def splitIntoTetrahedra(poly = "const PolyClipperPolyhedron&",
tol = ("const double", "0.0")):
"""Split a PolyClipper::Polyhedron into tetrahedra.
The result is returned as a vector<vector<int>>, where each inner vector is a set of four
ints representing vertex indices in the input Polyhedron."""
return "std::vector<std::vector<int>>"
#-------------------------------------------------------------------------------
# Vector standalone functions
#-------------------------------------------------------------------------------
@PYB11template("nDim")
def elementWiseMin(lhs = "const Dim<%(nDim)s>::Vector&",
rhs = "const Dim<%(nDim)s>::Vector&"):
"Find the coordinate by coordinate minimum of two Vectors."
return "Dim<%(nDim)s>::Vector"
@PYB11template("nDim")
def elementWiseMax(lhs = "const Dim<%(nDim)s>::Vector&",
rhs = "const Dim<%(nDim)s>::Vector&"):
"Find the coordinate by coordinate maximum of two Vectors."
return "Dim<%(nDim)s>::Vector"
elementWiseMin1 = PYB11TemplateFunction(elementWiseMin, template_parameters="1", pyname="elementWiseMin")
elementWiseMin2 = PYB11TemplateFunction(elementWiseMin, template_parameters="2", pyname="elementWiseMin")
elementWiseMin3 = PYB11TemplateFunction(elementWiseMin, template_parameters="3", pyname="elementWiseMin")
elementWiseMax1 = PYB11TemplateFunction(elementWiseMax, template_parameters="1", pyname="elementWiseMax")
elementWiseMax2 = PYB11TemplateFunction(elementWiseMax, template_parameters="2", pyname="elementWiseMax")
elementWiseMax3 = PYB11TemplateFunction(elementWiseMax, template_parameters="3", pyname="elementWiseMax")
#-------------------------------------------------------------------------------
# invertRankNTensor template
#-------------------------------------------------------------------------------
@PYB11template("TensorType")
def invertRankNTensor(tensor = "const %(TensorType)s&"):
"Compute the inverse of a tensor."
return "%(TensorType)s"
invertRankNTensor1 = PYB11TemplateFunction(invertRankNTensor,
template_parameters = "Dim<1>::Tensor",
pyname = "invertRankNTensor")
invertRankNTensor2 = PYB11TemplateFunction(invertRankNTensor,
template_parameters = "Dim<1>::SymTensor",
pyname = "invertRankNTensor")
invertRankNTensor3 = PYB11TemplateFunction(invertRankNTensor,
template_parameters = "Dim<1>::FourthRankTensor",
pyname = "invertRankNTensor")
#-------------------------------------------------------------------------------
# computeEigenValues
#-------------------------------------------------------------------------------
@PYB11template("Dim")
def computeEigenValues(field = "const Field<%(Dim)s, %(Dim)s::SymTensor>&",
eigenValues = "Field<%(Dim)s, %(Dim)s::Vector>&",
eigenVectors = "Field<%(Dim)s, %(Dim)s::Tensor>&"):
"Compute the eigenvalues for a field of symmetric tensors."
return "void"
computeEigenValues1 = PYB11TemplateFunction(computeEigenValues,
template_parameters = "Dim<1>",
pyname = "computeEigenValues")
computeEigenValues2 = PYB11TemplateFunction(computeEigenValues,
template_parameters = "Dim<2>",
pyname = "computeEigenValues")
computeEigenValues3 = PYB11TemplateFunction(computeEigenValues,
template_parameters = "Dim<3>",
pyname = "computeEigenValues")
#-------------------------------------------------------------------------------
# PolyClipper utility methods
#-------------------------------------------------------------------------------
@PYB11implementation('''[](const Dim<2>::FacetedVolume& Spheral_polygon) -> PolyClipperPolygon {
PolyClipperPolygon polygon;
convertToPolyClipper(polygon, Spheral_polygon);
return polygon;
}''')
def convertToPolyClipper(Spheral_polygon = "const Dim<2>::FacetedVolume&"):
"Convert a Spheral::Polygon --> PolyClipper::Polygon"
return "PolyClipperPolygon"
@PYB11implementation('''[](const PolyClipperPolygon& polygon) -> py::tuple {
Dim<2>::FacetedVolume Spheral_polygon;
auto clips = convertFromPolyClipper(Spheral_polygon, polygon);
return py::make_tuple(Spheral_polygon, clips);
}''')
def convertFromPolyClipper(polygon = "const PolyClipperPolygon&"):
"""Convert a PolyClipper::Polygon --> Spheral::Polygon
Returns the set of planes responsible for clipping each Vertex"""
return "py::tuple"
@PYB11implementation('''[](const Dim<3>::FacetedVolume& Spheral_polyhedron) -> PolyClipperPolyhedron {
PolyClipperPolyhedron polyhedron;
convertToPolyClipper(polyhedron, Spheral_polyhedron);
return polyhedron;
}''')
@PYB11pycppname("convertToPolyClipper")
def convertToPolyClipper3d(Spheral_polyhedron = "const Dim<3>::FacetedVolume&"):
"Convert a Spheral::Polyhedron --> PolyClipper::Polyhedron"
return "PolyClipperPolyhedron"
@PYB11implementation('''[](const PolyClipperPolyhedron& polyhedron) -> py::tuple {
Dim<3>::FacetedVolume Spheral_polyhedron;
auto clips = convertFromPolyClipper(Spheral_polyhedron, polyhedron);
return py::make_tuple(Spheral_polyhedron, clips);
}''')
@PYB11pycppname("convertFromPolyClipper")
def convertFromPolyClipper3d(polyhedron = "const PolyClipperPolyhedron&"):
"""Convert a PolyClipper::Polyhedron --> Spheral::Polyhedron
Returns the set of planes responsible for clipping each Vertex"""
return "py::tuple"
#-------------------------------------------------------------------------------
# Inner product (with a double)
#-------------------------------------------------------------------------------
@PYB11template("ValueType")
def innerProductScalar(A = "const double&",
B = "const %(ValueType)s&"):
"Inner product with a scalar."
return "%(ValueType)s"
@PYB11template("ValueType")
def innerProductScalarR(A = "const %(ValueType)s&",
B = "const double&"):
"Inner product with a scalar."
return "%(ValueType)s"
for VT in ("Vector", "Tensor", "SymTensor", "ThirdRankTensor", "FourthRankTensor", "FifthRankTensor"):
for ndim in (1, 2, 3):
exec("""
innerProduct%(VT)sScalar%(ndim)id = PYB11TemplateFunction(innerProductScalar,
template_parameters = "Dim<%(ndim)i>::%(VT)s",
pyname = "innerProduct",
cppname = "innerProduct<Dim<%(ndim)i>::%(VT)s>")
innerProductScalar%(VT)s%(ndim)id = PYB11TemplateFunction(innerProductScalarR,
template_parameters = "Dim<%(ndim)i>::%(VT)s",
pyname = "innerProduct",
cppname = "innerProduct<Dim<%(ndim)i>::%(VT)s>")
""" % {"VT" : VT,
"ndim" : ndim})
#-------------------------------------------------------------------------------
# General inner products
#-------------------------------------------------------------------------------
@PYB11template("AType", "BType", "ReturnType")
def innerProduct(A = "const %(AType)s&",
B = "const %(BType)s&"):
"Inner product (%(AType)s . %(BType)s -> %(ReturnType)s"
return "%(ReturnType)s"
# Map inner product types to result
IPRT = {("Vector", "Vector") : "double",
("Vector", "Tensor") : "Vector",
("Vector", "SymTensor") : "Vector",
("Vector", "ThirdRankTensor") : "Tensor",
("Vector", "FourthRankTensor") : "ThirdRankTensor",
("Tensor", "Tensor") : "Tensor",
("Tensor", "SymTensor") : "Tensor",
("SymTensor", "Tensor") : "Tensor",
("Tensor", "SymTensor") : "Tensor",
("SymTensor", "SymTensor") : "Tensor",
("Tensor", "ThirdRankTensor") : "ThirdRankTensor",
("Tensor", "FourthRankTensor") : "FourthRankTensor",
("SymTensor", "ThirdRankTensor") : "ThirdRankTensor",
("SymTensor", "FourthRankTensor") : "FourthRankTensor",
("ThirdRankTensor", "ThirdRankTensor") : "FourthRankTensor",
("ThirdRankTensor", "FourthRankTensor") : "FifthRankTensor",
}
for A, B in dict(IPRT):
IPRT[(B, A)] = IPRT[(A, B)]
for A, B in IPRT:
for ndim in (1, 2, 3):
exec("""
a = "Dim<%(ndim)i>::" + "%(A)s"
b = "Dim<%(ndim)i>::" + "%(B)s"
if "%(RT)s" == "double":
rt = "%(RT)s"
else:
rt = "Dim<%(ndim)i>::" + "%(RT)s"
innerProduct%(A)s%(B)s%(ndim)id = PYB11TemplateFunction(innerProduct,
template_parameters = (a, b, rt),
pyname = "innerProduct",
cppname = "innerProduct<Dim<%(ndim)i>>")
""" % {"A" : A,
"B" : B,
"RT" : IPRT[(A, B)],
"ndim" : ndim})
#-------------------------------------------------------------------------------
# General outer products
#-------------------------------------------------------------------------------
@PYB11template("AType", "BType", "ReturnType")
def outerProduct(A = "const %(AType)s&",
B = "const %(BType)s&"):
"Outer product (%(AType)s x %(BType)s -> %(ReturnType)s"
return "%(ReturnType)s"
# Map outer product types to result
OPRT = {("Scalar", "Scalar") : "Scalar",
("Scalar", "Vector") : "Vector",
("Scalar", "Tensor") : "Tensor",
("Scalar", "SymTensor") : "SymTensor",
("Scalar", "ThirdRankTensor") : "ThirdRankTensor",
("Scalar", "FourthRankTensor") : "FourthRankTensor",
("Scalar", "FifthRankTensor") : "FifthRankTensor",
("Vector", "Scalar") : "Vector",
("Tensor", "Scalar") : "Tensor",
("SymTensor", "Scalar") : "SymTensor",
("ThirdRankTensor", "Scalar") : "ThirdRankTensor",
("FourthRankTensor", "Scalar") : "FourthRankTensor",
("FifthRankTensor", "Scalar") : "FifthRankTensor",
("Vector", "Vector") : "Tensor",
("Vector", "Tensor") : "ThirdRankTensor",
("Vector", "SymTensor") : "ThirdRankTensor",
("Vector", "ThirdRankTensor") : "FourthRankTensor",
("Vector", "FourthRankTensor") : "FifthRankTensor",
("Tensor", "Tensor") : "FourthRankTensor",
("Tensor", "SymTensor") : "FourthRankTensor",
("Tensor", "ThirdRankTensor") : "FifthRankTensor",
("SymTensor", "ThirdRankTensor") : "FifthRankTensor",
}
for A, B in dict(OPRT):
OPRT[(B, A)] = OPRT[(A, B)]
for A, B in OPRT:
for ndim in (1, 2, 3):
exec("""
a = "Dim<%(ndim)i>::" + "%(A)s"
b = "Dim<%(ndim)i>::" + "%(B)s"
if "%(RT)s" == "double":
rt = "%(RT)s"
else:
rt = "Dim<%(ndim)i>::" + "%(RT)s"
outerProduct%(A)s%(B)s%(ndim)id = PYB11TemplateFunction(outerProduct,
template_parameters = (a, b, rt),
pyname = "outerProduct",
cppname = "outerProduct<Dim<%(ndim)i>>")
""" % {"A" : A,
"B" : B,
"RT" : OPRT[(A, B)],
"ndim" : ndim})
#-------------------------------------------------------------------------------
# Inner double product
#-------------------------------------------------------------------------------
@PYB11template("AType", "BType", "ReturnType")
def innerDoubleProduct(A = "const %(AType)s&",
B = "const %(BType)s&"):
"Inner double product (%(AType)s : %(BType)s -> %(ReturnType)s"
return "%(ReturnType)s"
# Map product types to result
IDPRT = {("Tensor", "Tensor") : "double",
("Tensor", "SymTensor") : "double",
("Tensor", "ThirdRankTensor") : "Vector",
("Tensor", "FourthRankTensor") : "Tensor",
("Tensor", "FifthRankTensor") : "ThirdRankTensor",
("SymTensor", "SymTensor") : "double",
("SymTensor", "ThirdRankTensor") : "Vector",
("SymTensor", "FourthRankTensor") : "Tensor",
("SymTensor", "FifthRankTensor") : "ThirdRankTensor",
("ThirdRankTensor", "ThirdRankTensor") : "Tensor",
("ThirdRankTensor", "FourthRankTensor"): "ThirdRankTensor",
("ThirdRankTensor", "FifthRankTensor") : "FourthRankTensor",
("FourthRankTensor", "FourthRankTensor") : "FourthRankTensor",
("FourthRankTensor", "FifthRankTensor") : "FifthRankTensor",
}
for A, B in dict(IDPRT):
IDPRT[(B, A)] = IDPRT[(A, B)]
for A, B in IDPRT:
for ndim in (1, 2, 3):
exec("""
a = "Dim<%(ndim)i>::" + "%(A)s"
b = "Dim<%(ndim)i>::" + "%(B)s"
if "%(RT)s" == "double":
rt = "%(RT)s"
else:
rt = "Dim<%(ndim)i>::" + "%(RT)s"
innerDoubleProduct%(A)s%(B)s%(ndim)id = PYB11TemplateFunction(innerDoubleProduct,
template_parameters = (a, b, rt),
pyname = "innerDoubleProduct",
cppname = "innerDoubleProduct<Dim<%(ndim)i>>")
""" % {"A" : A,
"B" : B,
"RT" : IDPRT[(A, B)],
"ndim" : ndim})
```
#### File: Pybind11Wraps/Geometry/GeometryRegistrar.py
```python
from PYB11Generator import *
#-------------------------------------------------------------------------------
# GeometryRegistrar
#-------------------------------------------------------------------------------
@PYB11singleton
class GeometryRegistrar:
# The instance attribute. We expose this as a property of the class.
@PYB11static
@PYB11returnpolicy("reference")
def instance(self):
return "GeometryRegistrar&"
# The coordinate system
@PYB11static
@PYB11pycppname("coords")
@PYB11returnpolicy("reference")
#@PYB11ignore
def get_coords(self):
return "CoordinateType"
@PYB11static
@PYB11pycppname("coords")
#@PYB11ignore
def set_coords(self,
x = "const CoordinateType"):
return "void"
#coords = property(get_coords, set_coords, doc="The coordinate system")
```
#### File: Pybind11Wraps/Geometry/Polygon.py
```python
from PYB11Generator import *
@PYB11cppname("GeomPolygon")
class Polygon:
PYB11typedefs = """
using Polygon = GeomPolygon;
using Vector = GeomPolygon::Vector;
using Tensor = GeomPolygon::Tensor;
using Facet = GeomPolygon::Facet;
"""
#...........................................................................
# Constructors
def pyinit(self):
"Default constructor"
def pyinit0(self,
rhs = "const Polygon&"):
"Copy constructor"
def pyinit1(self,
points = "const std::vector<Vector>&"):
"""Note this constructor constructs the convex hull of the given points,
meaning that the full set of points passed in may not appear in the vertices."""
def pyinit2(self,
points = "const std::vector<Vector>&",
facetIndices = "const std::vector<std::vector<unsigned> >&"):
"Construct with explicit vertices and facets"
@PYB11implementation("[](py::list& points) { std::vector<Vector> vpoints; for (auto p: points) vpoints.push_back(p.cast<Vector>()); return new GeomPolygon(vpoints); }")
def pyinit3(self,
points = "py::list"):
"Construct as the convex hull of a python list of points"
#...........................................................................
# Methods
@PYB11const
def contains(self,
point = "const Vector&",
countBoundary = ("const bool", "true"),
tol = ("const double", "1.0e-8")):
"Test if the given point is internal to the polygon."
return "bool"
@PYB11const
def convexContains(self,
point = "const Vector&",
countBoundary = ("const bool", "true"),
tol = ("const double", "1.0e-8")):
"Test if the given point is internal to the polygon (assumes convexity)."
return "bool"
@PYB11const
def intersect(self,
rhs = "const Polygon&"):
"Test if we intersect another polygon."
return "bool"
@PYB11const
def convexIntersect(self,
rhs = "const Polygon&"):
"Test if we intersect another polygon (assumes convexity)"
return "bool"
@PYB11const
@PYB11pycppname("intersect")
def intersect1(self,
rhs = "const std::pair<Vector, Vector>&"):
"Test if we intersect a box represented by a min/max pair of coordinates."
return "bool"
@PYB11const
@PYB11pycppname("intersect")
def intersect2(self,
s0 = "const Vector&",
s1 = "const Vector&"):
"Test if we intersect a line segment (interior counts as intersection)."
return "bool"
@PYB11const
@PYB11implementation("[](const Polygon& self, const Vector& s0, const Vector& s1) { std::vector<unsigned> facetIDs; std::vector<Vector> intersections; self.intersections(s0, s1, facetIDs, intersections); return py::make_tuple(facetIDs, intersections); }")
def intersections(self,
s0 = "const Vector&",
s1 = "const Vector&"):
"Return the intersections of this polygon with a line segment denoted by it's end points."
return "py::tuple"
@PYB11const
def edges(self):
"Get the edges as integer (node) pairs."
return "std::vector<std::pair<unsigned, unsigned> >"
def reconstruct(self,
vertices = "const std::vector<Vector>&",
facetVertices = "const std::vector<std::vector<unsigned> >&"):
"""Reconstruct the internal data given a set of verticies and the vertex
indices that define the facets."""
return "void"
@PYB11const
def closestFacet(self, p = "const Vector&"):
"Find the facet closest to the given point."
return "unsigned"
@PYB11const
def distance(self, p="const Vector&"):
"Compute the minimum distance to a point."
return "double"
@PYB11const
def closestPoint(self, p="const Vector&"):
"Find the point in the polygon closest to the given point."
return "Vector"
@PYB11const
def convex(self, tol = "double"):
"Test if the polygon is convex"
return "bool"
def setBoundingBox(self):
"Set the internal bounding box"
return "void"
@PYB11const
def facetArea(self, facetID="const unsigned"):
return "double"
@PYB11const
def facetAreaNormal(self, facetID="const unsigned"):
return "Vector"
@PYB11const
def facetSubVolume(self, facetID="const unsigned"):
"Decompose the polygon into triangles for each facet"
return "Polygon"
def transform(self, t="const Tensor&"):
"Apply a general transformation tensor"
return "Polygon&"
#...........................................................................
# Operators
def __iadd__(self, rhs="Vector()"):
return
def __isub__(self, rhs="Vector()"):
return
def __add__(self, rhs="Vector()"):
return
def __sub__(self, rhs="Vector()"):
return
def __imul__(self, rhs="double()"):
return
def __idiv__(self, rhs="double()"):
return
def __mul__(self, rhs="double()"):
return
def __div__(self, rhs="double()"):
return
def __eq__(self):
return
def __ne__(self):
return
#...........................................................................
# Properties
centroid = PYB11property("Vector")
vertices = PYB11property("const std::vector<Vector>&", returnpolicy="reference_internal")
facets = PYB11property("const std::vector<Facet>&", returnpolicy="reference_internal")
facetVertices = PYB11property("std::vector<std::vector<unsigned> >",
doc="Spit out a vector<vector<unsigned> > that encodes the facets.")
vertexUnitNorms = PYB11property("const std::vector<Vector>&", returnpolicy="reference_internal")
vertexFacetConnectivity = PYB11property("const std::vector<std::vector<unsigned> >&", returnpolicy="reference_internal")
facetFacetConnectivity = PYB11property("const std::vector<std::vector<unsigned> >&", returnpolicy="reference_internal")
xmin = PYB11property("const Vector&", returnpolicy="reference_internal")
xmax = PYB11property("const Vector&", returnpolicy="reference_internal")
volume = PYB11property("double")
```
#### File: Pybind11Wraps/GSPH/LimiterBaseAbstractMethods.py
```python
from PYB11Generator import *
@PYB11ignore
class LimiterBaseAbstractMethods:
@PYB11const
def fluxLimiter(self,
x = "const Scalar"):
"flux limiter."
return "Scalar"
```
#### File: Pybind11Wraps/Integrator/SynchronousRK2Integrator.py
```python
from PYB11Generator import *
from IntegratorAbstractMethods import *
from Integrator import *
@PYB11template("Dimension")
@PYB11cppname("SynchronousRK2")
class SynchronousRK2Integrator(Integrator):
"Second-order in time explicit Runge-Kutta time integration scheme"
PYB11typedefs = """
typedef typename %(Dimension)s::Scalar Scalar;
typedef typename %(Dimension)s::Vector Vector;
typedef typename %(Dimension)s::Tensor Tensor;
typedef typename %(Dimension)s::SymTensor SymTensor;
typedef typename %(Dimension)s::ThirdRankTensor ThirdRankTensor;
"""
#...........................................................................
# Constructors
def pyinit(self):
"Construct an itegrator"
def pyinit1(self, dataBase = "DataBase<%(Dimension)s>&"):
"Construct an integrator with a DataBase"
def pyinit2(self,
dataBase = "DataBase<%(Dimension)s>&",
physicsPackages = "const std::vector<Physics<%(Dimension)s>*>&"):
"Construct an integrator with a DataBase and physics packages"
#...........................................................................
# Virtual methods
@PYB11virtual
@PYB11pycppname("step")
def step1(self, maxTime="Scalar"):
"Take a step"
return "bool"
@PYB11virtual
@PYB11const
def label(self):
return "std::string"
#-------------------------------------------------------------------------------
# Inject other interfaces
#-------------------------------------------------------------------------------
PYB11inject(IntegratorAbstractMethods, SynchronousRK2Integrator, pure_virtual=False, virtual=True)
```
#### File: Pybind11Wraps/Material/PolytropicEquationOfState.py
```python
from PYB11Generator import *
from EquationOfState import *
from EOSAbstractMethods import *
@PYB11template("Dimension")
class PolytropicEquationOfState(EquationOfState):
PYB11typedefs = """
typedef typename %(Dimension)s::Scalar Scalar;
typedef Field<%(Dimension)s, Scalar> ScalarField;
"""
#...........................................................................
# Constructors
def pyinit(self,
K = "const double",
index = "const double",
mu = "const double",
constants = "const PhysicalConstants&",
minimumPressure = ("const double", "-std::numeric_limits<double>::max()"),
maximumPressure = ("const double", "std::numeric_limits<double>::max()"),
minPressureType = ("const MaterialPressureMinType", "MaterialPressureMinType::PressureFloor")):
"Provides the polytropic EOS: P = K rho^gamma; gamma = (index + 1)/index"
#...........................................................................
# Methods
@PYB11const
def pressure(self,
massDensity = "const Scalar",
specificThermalEnergy = "const Scalar"):
return "Scalar"
@PYB11const
def temperature(self,
massDensity = "const Scalar",
specificThermalEnergy = "const Scalar"):
return "Scalar"
@PYB11const
def specificThermalEnergy(self,
massDensity = "const Scalar",
temperature = "const Scalar"):
return "Scalar"
@PYB11const
def specificHeat(self,
massDensity = "const Scalar",
temperature = "const Scalar"):
return "Scalar"
@PYB11const
def soundSpeed(self,
massDensity = "const Scalar",
specificThermalEnergy = "const Scalar"):
return "Scalar"
# @PYB11const
# @PYB11cppname("gamma")
# def gamma(self,
# massDensity = "const Scalar",
# specificThermalEnergy = "const Scalar"):
# return "Scalar"
@PYB11const
def bulkModulus(self,
massDensity = "const Scalar",
specificThermalEnergy = "const Scalar"):
return "Scalar"
@PYB11const
def entropy(self,
massDensity = "const Scalar",
specificThermalEnergy = "const Scalar"):
return "Scalar"
@PYB11virtual
@PYB11const
def molecularWeight(self):
"Optionally provide a molecular weight for an equation of state"
return "Scalar"
#...........................................................................
# Properties
polytropicConstant = PYB11property("double", "polytropicConstant", doc="K: the polytropic constant in front")
polytropicIndex = PYB11property("double", "polytropicIndex", doc="polytropic index: gamma = (index + 1)/index")
gamma = PYB11property("double", "gamma", doc="gamma: ratio of specific heats")
mu = PYB11property("double", "molecularWeight", doc="mean molecular weight")
externalPressure = PYB11property("double", "externalPressure", "setExternalPressure", doc="Any external pressure (subtracted from the pressure calculation")
#-------------------------------------------------------------------------------
# Add the virtual interface
#-------------------------------------------------------------------------------
PYB11inject(EOSAbstractMethods, PolytropicEquationOfState)
```
#### File: Pybind11Wraps/NodeList/NodeListMOD.py
```python
from PYB11Generator import *
from SpheralCommon import *
from spheralDimensions import *
dims = spheralDimensions()
#-------------------------------------------------------------------------------
# Includes
#-------------------------------------------------------------------------------
PYB11includes += ['"NodeList/NodeListRegistrar.hh"',
'"NodeList/NodeList.hh"',
'"NodeList/FluidNodeList.hh"',
'"NodeList/SolidNodeList.hh"',
'"NodeList/SmoothingScaleBase.hh"',
'"NodeList/FixedSmoothingScale.hh"',
'"NodeList/SPHSmoothingScale.hh"',
'"NodeList/ASPHSmoothingScale.hh"',
'"NodeList/generateVoidNodes.hh"',
'"NodeList/nthNodalMoment.hh"',
'"Material/EquationOfState.hh"',
'"SolidMaterial/StrengthModel.hh"',
'"Kernel/TableKernel.hh"',
'"Neighbor/ConnectivityMap.hh"',
'"Mesh/Mesh.hh"',
'"FileIO/FileIO.hh"']
#-------------------------------------------------------------------------------
# Namespaces
#-------------------------------------------------------------------------------
PYB11namespaces = ["Spheral"]
#-------------------------------------------------------------------------------
# preamble
#-------------------------------------------------------------------------------
for ndim in dims:
PYB11preamble += "typedef std::pair<NodeList<Dim<%(ndim)i>>*, std::string> pair_NodeList%(ndim)idptr_string;\n" % {"ndim": ndim}
#-------------------------------------------------------------------------------
# Enums
#-------------------------------------------------------------------------------
NodeType = PYB11enum(("InternalNode", "GhostNode"), export_values=True,
doc="The classifications of Spheral nodes.")
#-------------------------------------------------------------------------------
# Do our dimension dependent instantiations.
#-------------------------------------------------------------------------------
from NodeListRegistrar import NodeListRegistrar
from NodeList import NodeList
from FluidNodeList import FluidNodeList
from SolidNodeList import SolidNodeList
from SmoothingScaleBase import SmoothingScaleBase
from FixedSmoothingScale import FixedSmoothingScale
from SPHSmoothingScale import SPHSmoothingScale
from ASPHSmoothingScale import ASPHSmoothingScale
for ndim in dims:
exec('''
NodeListRegistrar%(ndim)id = PYB11TemplateClass(NodeListRegistrar, template_parameters="Dim<%(ndim)i>")
NodeList%(ndim)id = PYB11TemplateClass(NodeList, template_parameters="Dim<%(ndim)i>")
FluidNodeList%(ndim)id = PYB11TemplateClass(FluidNodeList, template_parameters="Dim<%(ndim)i>")
SolidNodeList%(ndim)id = PYB11TemplateClass(SolidNodeList, template_parameters="Dim<%(ndim)i>")
SmoothingScaleBase%(ndim)id = PYB11TemplateClass(SmoothingScaleBase, template_parameters="Dim<%(ndim)i>")
FixedSmoothingScale%(ndim)id = PYB11TemplateClass(FixedSmoothingScale, template_parameters="Dim<%(ndim)i>")
SPHSmoothingScale%(ndim)id = PYB11TemplateClass(SPHSmoothingScale, template_parameters="Dim<%(ndim)i>")
ASPHSmoothingScale%(ndim)id = PYB11TemplateClass(ASPHSmoothingScale, template_parameters="Dim<%(ndim)i>")
vector_of_NodeList%(ndim)id = PYB11_bind_vector("NodeList<Dim<%(ndim)i>>*", opaque=True, local=False)
vector_of_FluidNodeList%(ndim)id = PYB11_bind_vector("FluidNodeList<Dim<%(ndim)i>>*", opaque=True, local=False)
vector_of_SolidNodeList%(ndim)id = PYB11_bind_vector("SolidNodeList<Dim<%(ndim)i>>*", opaque=True, local=False)
vector_of_pair_NodeList%(ndim)id_string = PYB11_bind_vector("pair_NodeList%(ndim)idptr_string", opaque=True, local=False)
''' % {"ndim" : ndim})
#-------------------------------------------------------------------------------
# Functions
#-------------------------------------------------------------------------------
@PYB11template("Dimension")
def generateVoidNodes(generators = "const std::vector<typename %(Dimension)s::Vector>&",
Hs = "const std::vector<typename %(Dimension)s::SymTensor>&",
mesh = "const Mesh<%(Dimension)s>&",
xmin = "const typename %(Dimension)s::Vector&",
xmax = "const typename %(Dimension)s::Vector&",
numInternal = "const unsigned",
nPerh = "const double",
threshold = "const double",
voidNodes = "NodeList<%(Dimension)s>&"):
"""This algorithm tries to analyze how continuous a node distribution is, and
if it determines there is an edge to the distribution creates new void nodes
outside that surface.
We assume here that the caller has already created all the boundary ghost
nodes."""
return "void"
@PYB11template("Dimension", "moment")
@PYB11implementation("""[](const std::vector<NodeList<%(Dimension)s>*>& nodeLists,
const TableKernel<%(Dimension)s>& W,
const bool renormalize) -> FieldList<%(Dimension)s, typename MomentTraits<%(Dimension)s, %(moment)s>::Moment> {
return nthNodalMoment<%(Dimension)s, typename std::vector<NodeList<%(Dimension)s>*>::const_iterator, %(moment)s>
(nodeLists.begin(), nodeLists.end(), W, renormalize);
}""")
def nthNodalMoment(nodeLists = "const std::vector<NodeList<%(Dimension)s>*>&",
W = "const TableKernel<%(Dimension)s>&",
renormalize = "const bool"):
""" Compute the nth (with n=%(moment)s moment of the local nodal distribution in \\\eta space:
\\\sum_j (\\\eta_i)^n W_ij
-----------------------
\\\sum_j W_ij
"""
return "FieldList<%(Dimension)s, typename MomentTraits<%(Dimension)s, %(moment)s>::Moment>"
@PYB11template("Dimension")
@PYB11implementation("""[](const std::vector<NodeList<%(Dimension)s>*>& nodeLists,
const TableKernel<%(Dimension)s>& W,
const bool useGradientAsKernel,
FieldList<%(Dimension)s, typename %(Dimension)s::Scalar>& zerothMoment,
FieldList<%(Dimension)s, typename %(Dimension)s::Vector>& firstMoment) {
zerothAndFirstNodalMoments<%(Dimension)s, typename std::vector<NodeList<%(Dimension)s>*>::const_iterator>
(nodeLists.begin(), nodeLists.end(), W, useGradientAsKernel, zerothMoment, firstMoment);
}""")
def zerothAndFirstNodalMoments(nodeLists = "const std::vector<NodeList<%(Dimension)s>*>&",
W = "const TableKernel<%(Dimension)s>&",
useKernelAsGradient = "const bool",
zerothMoment = "FieldList<%(Dimension)s, typename %(Dimension)s::Scalar>&",
firstMoment = "FieldList<%(Dimension)s, typename %(Dimension)s::Vector>&"):
"Compute the non-normalized zeroth and normalized first moment in eta space -- calls nthNodalMoment."
return "void"
for ndim in dims:
exec('''
generateVoidNodes%(ndim)id = PYB11TemplateFunction(generateVoidNodes, template_parameters="Dim<%(ndim)i>", pyname="generateVoidNodes")
zerothNodalMoment%(ndim)id = PYB11TemplateFunction(nthNodalMoment, template_parameters=("Dim<%(ndim)i>", "0"), pyname="zerothNodalMoment")
firstNodalMoment%(ndim)id = PYB11TemplateFunction(nthNodalMoment, template_parameters=("Dim<%(ndim)i>", "1"), pyname="firstNodalMoment")
secondNodalMoment%(ndim)id = PYB11TemplateFunction(nthNodalMoment, template_parameters=("Dim<%(ndim)i>", "2"), pyname="secondNodalMoment")
zerothAndFirstNodalMoments%(ndim)id = PYB11TemplateFunction(zerothAndFirstNodalMoments, template_parameters="Dim<%(ndim)i>", pyname="zerothAndFirstNodalMoments")
''' % {"ndim" : ndim})
```
#### File: Pybind11Wraps/RK/ReproducingKernelMethods.py
```python
from PYB11Generator import *
@PYB11template("Dimension")
class ReproducingKernelMethods:
"""Provides the reproducing kernel methods, analogous to the Kernel class for SPH
This is really just a convenient front-end for the methods in RKUtilities"""
PYB11typedefs = """
typedef typename %(Dimension)s::Scalar Scalar;
typedef typename %(Dimension)s::Vector Vector;
typedef typename %(Dimension)s::Tensor Tensor;
typedef typename %(Dimension)s::SymTensor SymTensor;
typedef typename Eigen::SparseMatrix<double> TransformationMatrix;
"""
def pyinit(self,
order = "const RKOrder"):
"Constructor"
@PYB11const
def transformationMatrix(T = "const Tensor&",
needHessian = "const bool"):
"Compute the transformation matrix to apply to the RK coefficients for the given Tensor"
return "TransformationMatrix"
@PYB11const
def applyTransformation(self,
T = "const TransformationMatrix&",
corrections = "RKCoefficients<%(Dimension)s>&"):
"Apply the transformation T to the corrections"
return "void"
#..........................................................................
# Attributes
order = PYB11property(doc="order to which we are enforcing reproducibility")
gradCorrectionsSize = PYB11property(doc="The size of the RKCoefficients for corrections + grad")
hessCorrectionsSize = PYB11property(doc="The size of the RKCoefficients for corrections + grad + hessian")
```
#### File: Pybind11Wraps/SolidMaterial/PorousStrengthModel.py
```python
from PYB11Generator import *
from StrengthModel import *
from StrengthModelAbstractMethods import *
@PYB11template("Dimension")
@PYB11module("SpheralSolidMaterial")
class PorousStrengthModel(StrengthModel):
"""PorousStrengthModel
An implementation of strain-alpha porosity model described in
<NAME>, & <NAME>, 180, 514-527 (2006)
"A strain-based porosity model for use in hydrocode simulations of impacts
and implications for transient crater growth in porous targets"
This model assumes you will provide a solid EOS which will be modified.
The underlying actualy solid EOS should provide the reference density, which
will be treated here as the compacted true solid reference density.
Note this model introduces a new state variable, the distention (alpha), which
the pressure now depends on. This implies our usual definition of P(rho, eps)
now becomes P(rho, eps, alpha). Our EOS interface does not recognize this
this parameter, so we store alpha locally and only allow Field updates of the
pressure (forbidding the single value P lookup the EOS usually allows)."""
PYB11typedefs = """
using Scalar = typename %(Dimension)s::Scalar;
using SymTensor = typename %(Dimension)s::SymTensor;
using ScalarField = Field<%(Dimension)s, Scalar>;
"""
#...........................................................................
# Constructors
def pyinit(self,
solidStrength = "const StrengthModel<%(Dimension)s>&"):
"Construct with the strength model we're modifying"
#...........................................................................
# Virtual methods
@PYB11virtual
@PYB11const
def providesSoundSpeed(self):
return "bool"
@PYB11virtual
@PYB11const
def providesBulkModulus(self):
return "bool"
@PYB11virtual
@PYB11const
def soundSpeed(self,
soundSpeed = "Field<%(Dimension)s, Scalar>&",
density = "const Field<%(Dimension)s, Scalar>&",
specificThermalEnergy = "const Field<%(Dimension)s, Scalar>&",
pressure = "const Field<%(Dimension)s, Scalar>&",
fluidSoundSpeed = "const Field<%(Dimension)s, Scalar>&",
damage = "const Field<%(Dimension)s, SymTensor>&"):
return "void"
@PYB11virtual
@PYB11const
def bulkModulus(self,
bulkModulus = "Field<%(Dimension)s, Scalar>&",
massDensity = "const Field<%(Dimension)s, Scalar>&",
specificThermalEnergy = "const Field<%(Dimension)s, Scalar>&"):
return "void"
@PYB11virtual
@PYB11const
def meltSpecificEnergy(self,
meltSpecificEnergy = "Field<%(Dimension)s, Scalar>&",
density = "const Field<%(Dimension)s, Scalar>&",
specficThermalEnergy = "const Field<%(Dimension)s, Scalar>&"):
return "void"
@PYB11virtual
@PYB11const
def coldSpecificEnergy(self,
coldSpecificEnergy = "Field<%(Dimension)s, Scalar>&",
density = "const Field<%(Dimension)s, Scalar>&",
specficThermalEnergy = "const Field<%(Dimension)s, Scalar>&"):
return "void"
#...........................................................................
# Properties
solidStrength = PYB11property("const StrengthModel<%(Dimension)s>&", returnpolicy="reference_internal")
alpha = PYB11property("const Field<%(Dimension)s, Scalar>&", "alpha", "alpha", returnpolicy="reference_internal")
#-------------------------------------------------------------------------------
# Inject abstract interface
#-------------------------------------------------------------------------------
PYB11inject(StrengthModelAbstractMethods, PorousStrengthModel, virtual=True, pure_virtual=False)
```
#### File: src/Pybind11Wraps/SpheralPickle.py
```python
import copy_reg, pickle
from SpheralCompiledPackages import *
#-------------------------------------------------------------------------------
# Vector1d
#-------------------------------------------------------------------------------
def construct_Vector1d(x):
return Vector1d(x)
def reduce_Vector1d(obj):
return construct_Vector1d, (obj.x,)
copy_reg.pickle(type(Vector1d()), reduce_Vector1d, construct_Vector1d)
#-------------------------------------------------------------------------------
# Vector2d
#-------------------------------------------------------------------------------
def construct_Vector2d(x, y):
return Vector2d(x, y)
def reduce_Vector2d(obj):
return construct_Vector2d, (obj.x, obj.y)
copy_reg.pickle(type(Vector2d()), reduce_Vector2d, construct_Vector2d)
#-------------------------------------------------------------------------------
# Vector3d
#-------------------------------------------------------------------------------
def construct_Vector3d(x, y, z):
return Vector3d(x, y, z)
def reduce_Vector3d(obj):
return construct_Vector3d, (obj.x, obj.y, obj.z)
copy_reg.pickle(type(Vector3d()), reduce_Vector3d, construct_Vector3d)
#-------------------------------------------------------------------------------
# Tensor1d
#-------------------------------------------------------------------------------
def construct_Tensor1d(xx):
return Tensor1d(xx)
def reduce_Tensor1d(obj):
return construct_Tensor1d, (obj.xx,)
copy_reg.pickle(type(Tensor1d()), reduce_Tensor1d, construct_Tensor1d)
#-------------------------------------------------------------------------------
# Tensor2d
#-------------------------------------------------------------------------------
def construct_Tensor2d(xx, xy,
yx, yy):
return Tensor2d(xx, xy,
yx, yy)
def reduce_Tensor2d(obj):
return construct_Tensor2d, (obj.xx, obj.xy,
obj.yx, obj.yy)
copy_reg.pickle(type(Tensor2d()), reduce_Tensor2d, construct_Tensor2d)
#-------------------------------------------------------------------------------
# Tensor3d
#-------------------------------------------------------------------------------
def construct_Tensor3d(xx, xy, xz,
yx, yy, yz,
zx, zy, zz):
return Tensor3d(xx, xy, xz,
yx, yy, yz,
zx, zy, zz)
def reduce_Tensor3d(obj):
return construct_Tensor3d, (obj.xx, obj.xy, obj.xz,
obj.yx, obj.yy, obj.yz,
obj.zx, obj.zy, obj.zz)
copy_reg.pickle(type(Tensor3d()), reduce_Tensor3d, construct_Tensor3d)
#-------------------------------------------------------------------------------
# SymTensor1d
#-------------------------------------------------------------------------------
def construct_SymTensor1d(xx):
return SymTensor1d(xx)
def reduce_SymTensor1d(obj):
return construct_SymTensor1d, (obj.xx,)
copy_reg.pickle(type(SymTensor1d()), reduce_SymTensor1d, construct_SymTensor1d)
#-------------------------------------------------------------------------------
# SymTensor2d
#-------------------------------------------------------------------------------
def construct_SymTensor2d(xx, xy,
yx, yy):
return SymTensor2d(xx, xy,
yx, yy)
def reduce_SymTensor2d(obj):
return construct_SymTensor2d, (obj.xx, obj.xy,
obj.yx, obj.yy)
copy_reg.pickle(type(SymTensor2d()), reduce_SymTensor2d, construct_SymTensor2d)
#-------------------------------------------------------------------------------
# SymTensor3d
#-------------------------------------------------------------------------------
def construct_SymTensor3d(xx, xy, xz,
yx, yy, yz,
zx, zy, zz):
return SymTensor3d(xx, xy, xz,
yx, yy, yz,
zx, zy, zz)
def reduce_SymTensor3d(obj):
return construct_SymTensor3d, (obj.xx, obj.xy, obj.xz,
obj.yx, obj.yy, obj.yz,
obj.zx, obj.zy, obj.zz)
copy_reg.pickle(type(SymTensor3d()), reduce_SymTensor3d, construct_SymTensor3d)
#-------------------------------------------------------------------------------
# ThirdRankTensor1d
#-------------------------------------------------------------------------------
def construct_ThirdRankTensor1d(x00):
result = ThirdRankTensor1d()
for i in xrange(1):
result[i] = eval("x%02i" % i)
return result
def reduce_ThirdRankTensor1d(obj):
return construct_ThirdRankTensor1d, tuple(obj[i] for i in xrange(1))
copy_reg.pickle(type(ThirdRankTensor1d()), reduce_ThirdRankTensor1d, construct_ThirdRankTensor1d)
#-------------------------------------------------------------------------------
# ThirdRankTensor2d
#-------------------------------------------------------------------------------
def construct_ThirdRankTensor2d(x00, x01, x02, x03, x04, x05, x06, x07):
result = ThirdRankTensor2d()
for i in xrange(8):
result[i] = eval("x%02i" % i)
return result
def reduce_ThirdRankTensor2d(obj):
return construct_ThirdRankTensor2d, tuple(obj[i] for i in xrange(8))
copy_reg.pickle(type(ThirdRankTensor2d()), reduce_ThirdRankTensor2d, construct_ThirdRankTensor2d)
#-------------------------------------------------------------------------------
# ThirdRankTensor3d
#-------------------------------------------------------------------------------
def construct_ThirdRankTensor3d(x00, x01, x02, x03, x04, x05, x06, x07, x08, x09,
x10, x11, x12, x13, x14, x15, x16, x17, x18, x19,
x20, x21, x22, x23, x24, x25, x26):
result = ThirdRankTensor3d()
for i in xrange(27):
result[i] = eval("x%02i" % i)
return result
def reduce_ThirdRankTensor3d(obj):
return construct_ThirdRankTensor3d, tuple(obj[i] for i in xrange(27))
copy_reg.pickle(type(ThirdRankTensor3d()), reduce_ThirdRankTensor3d, construct_ThirdRankTensor3d)
#-------------------------------------------------------------------------------
# Box1d
#-------------------------------------------------------------------------------
def construct_Box1d(encoded_string):
return unpackElementBox1d(encoded_string)
def reduce_Box1d(obj):
return construct_Box1d, (packElementBox1d(obj),)
copy_reg.pickle(type(Box1d()), reduce_Box1d, construct_Box1d)
#-------------------------------------------------------------------------------
# Polygon
#-------------------------------------------------------------------------------
def construct_Polygon(encoded_string):
return unpackElementPolygon(encoded_string)
def reduce_Polygon(obj):
return construct_Polygon, (packElementPolygon(obj),)
copy_reg.pickle(type(Polygon()), reduce_Polygon, construct_Polygon)
#-------------------------------------------------------------------------------
# Polyhedron
#-------------------------------------------------------------------------------
def construct_Polyhedron(encoded_string):
return unpackElementPolyhedron(encoded_string)
def reduce_Polyhedron(obj):
return construct_Polyhedron, (packElementPolyhedron(obj),)
copy_reg.pickle(type(Polyhedron()), reduce_Polyhedron, construct_Polyhedron)
#------------------------------------------------------------------------------
# std::vectors
#------------------------------------------------------------------------------
vector_template = """
def reduce_vector_of_%(value_type)s(obj):
return vector2string(obj)
def construct_vector_of_%(value_type)s(strobj):
return string2vector_of_%(value_type)s(strobj)
copy_reg.pickle(vector_of_%(value_type)s, reduce_vector_of_%(value_type)s, construct_vector_of_%(value_type)s)
"""
for t in ("int", "unsigned", "ULL", "double", "string"):
# "Vector1d", "Vector2d", "Vector3d",
# "Tensor1d", "Tensor2d", "Tensor3d",
# "SymTensor1d", "SymTensor2d", "SymTensor3d",
# "ThirdRankTensor1d", "ThirdRankTensor2d", "ThirdRankTensor3d"):
exec(vector_template % {"value_type" : t})
```
#### File: Pybind11Wraps/SVPH/SVPHMOD.py
```python
from PYB11Generator import *
from SpheralCommon import *
from spheralDimensions import *
dims = spheralDimensions()
from SVPHFieldNames import *
from SVPHFacetedHydroBase import *
#-------------------------------------------------------------------------------
# Includes
#-------------------------------------------------------------------------------
PYB11includes += ['"SVPH/SVPHFieldNames.hh"',
'"SVPH/sampleFieldListSVPH.hh"',
'"SVPH/gradientFieldListSVPH.hh"',
'"SVPH/SVPHHydroBase.hh"',
'"SVPH/SVPHFacetedHydroBase.hh"',
'"Neighbor/ConnectivityMap.hh"',
'"FileIO/FileIO.hh"',
'"ArtificialViscosity/ArtificialViscosity.hh"']
#-------------------------------------------------------------------------------
# Namespaces
#-------------------------------------------------------------------------------
PYB11namespaces = ["Spheral"]
#-------------------------------------------------------------------------------
# Methods
#-------------------------------------------------------------------------------
@PYB11template("Dimension", "DataType")
def sampleFieldListSVPH(fieldList = "const FieldList<%(Dimension)s, %(DataType)s>&",
position = "const FieldList<%(Dimension)s, typename %(Dimension)s::Vector>&",
Hfield = "const FieldList<%(Dimension)s, typename %(Dimension)s::SymTensor>&",
connectivityMap = "const ConnectivityMap<%(Dimension)s>&",
W = "const TableKernel<%(Dimension)s>&",
mesh = "const Mesh<%(Dimension)s>&",
firstOrderConsistent = "const bool"):
"Use SVPH to sample a FieldList."
return "FieldList<%(Dimension)s, %(DataType)s>"
@PYB11template("Dimension", "DataType")
def gradientFieldListSVPH(fieldList = "const FieldList<%(Dimension)s, %(DataType)s>&",
position = "const FieldList<%(Dimension)s, typename %(Dimension)s::Vector>&",
Hfield = "const FieldList<%(Dimension)s, typename %(Dimension)s::SymTensor>&",
connectivityMap = "const ConnectivityMap<%(Dimension)s>&",
W = "const TableKernel<%(Dimension)s>&",
mesh = "const Mesh<%(Dimension)s>&",
firstOrderConsistent = "const bool"):
"Use SVPH to take the gradient of a FieldList."
return "FieldList<%(Dimension)s, typename MathTraits<%(Dimension)s, %(DataType)s>::GradientType>"
#-------------------------------------------------------------------------------
# Instantiate our types
#-------------------------------------------------------------------------------
for ndim in dims:
exec('''
SVPHFacetedHydroBase%(ndim)id = PYB11TemplateClass(SVPHFacetedHydroBase, template_parameters="%(Dimension)s")
''' % {"ndim" : ndim,
"Dimension" : "Dim<" + str(ndim) + ">"})
# SVPH interpolation
for element in ("Dim<%i>::Scalar" % ndim,
"Dim<%i>::Vector" % ndim,
"Dim<%i>::Tensor" % ndim,
"Dim<%i>::SymTensor" % ndim):
exec('''
sampleFieldListSVPH%(label)s = PYB11TemplateFunction(sampleFieldListSVPH, template_parameters=("%(Dimension)s", "%(element)s"), pyname="sampleFieldListSVPH")
''' % {"ndim" : ndim,
"Dimension" : "Dim<" + str(ndim) + ">",
"element" : element,
"label" : PYB11mangle(element)})
# SVPH gradient
for element in ("Dim<%i>::Scalar" % ndim,
"Dim<%i>::Vector" % ndim):
exec('''
gradientFieldListSVPH%(label)s = PYB11TemplateFunction(gradientFieldListSVPH, template_parameters=("%(Dimension)s", "%(element)s"), pyname="gradientFieldListSVPH")
''' % {"ndim" : ndim,
"Dimension" : "Dim<" + str(ndim) + ">",
"element" : element,
"label" : PYB11mangle(element)})
```
#### File: Pybind11Wraps/testPYB11/testEnumsMOD.py
```python
from PYB11Generator import *
# List the files we want to include.
PYB11includes = ['<iostream>']
# We can specify arbitrary C++ to be inserted at the beginning of the file.
preamble = """
namespace Aspace {
class A {
public:
A() { std::cerr << "A()" << std::endl; }
virtual ~A() { std::cerr << "~A()" << std::endl; }
virtual void do_something() const { std::cerr << "A::do_something" << std::endl; }
virtual void do_something_else() const { std::cerr << "A::do_something_else" << std::endl; }
virtual int yet_another_method() const { std::cerr << "A::yet_another_method" << std::endl; return 42; }
enum class Furniture { chair, bed, couch };
};
enum class Color { black, white, red, blue, yellow };
}
namespace Bspace {
template<typename T1, typename T2>
class B {
public:
B() { std::cerr << "B<T1, T2>()" << std::endl; }
enum class Rodent { mouse, squirrel, gerbil };
};
}
"""
#-------------------------------------------------------------------------------
# A
#-------------------------------------------------------------------------------
@PYB11namespace("Aspace")
class A:
def pyinit(self):
"Default constructor."
@PYB11virtual
@PYB11const
def do_something(self):
"A virtual do_something method."
return "void"
@PYB11virtual
@PYB11const
def do_something_else(self):
"A virtual do_something_else method."
return "void"
@PYB11virtual
@PYB11const
def yet_another_method(self):
"A virtual yet_another_method."
return "int"
Furniture = PYB11enum(("chair", "bed", "couch"))
#-------------------------------------------------------------------------------
# B
#-------------------------------------------------------------------------------
@PYB11namespace("Bspace")
@PYB11template("T1", "T2")
class B:
def pyinit(self):
"Default constructor B<%(T1)s, %(T2)s>."
Rodent = PYB11enum(("mouse", "squirrel", "gerbil"))
# B<int, double>
Bintdouble = PYB11TemplateClass(B, template_parameters=("int", "double"))
#-------------------------------------------------------------------------------
# Color
#-------------------------------------------------------------------------------
Color = PYB11enum(("black", "white", "red", "blue", "yellow"),
namespace="Aspace")
#-------------------------------------------------------------------------------
# Attributes.
#-------------------------------------------------------------------------------
the_answer = PYB11attr("42")
what = PYB11attr('py::cast("The world")')
```
#### File: Pybind11Wraps/Utilities/DomainNode.py
```python
from PYB11Generator import *
@PYB11template("Dimension")
class DomainNode:
#...........................................................................
# Attributes
localNodeID = PYB11readwrite()
uniqueLocalNodeID = PYB11readwrite()
globalNodeID = PYB11readwrite()
nodeListID = PYB11readwrite()
domainID = PYB11readwrite()
work = PYB11readwrite()
position = PYB11readwrite()
packSize = PYB11property("size_t", static=True)
pack = PYB11property("std::vector<double>")
#...........................................................................
# Comparators
def __eq__(self):
return
def __ne__(self):
return
```
#### File: Pybind11Wraps/Utilities/QuadraticInterpolator.py
```python
from PYB11Generator import *
class QuadraticInterpolator:
"""Encapsulates the algorithm and data for parabolic interpolation in 1D
Assumes the results is interpolated as y_interp = a + b*x + c*x^2"""
def pyinit(self):
"Default constuctor -- returns a non-functional interpolator until initialized"
return
def pyinit1(self,
xmin = "const double",
xmax = "const double",
yvals = "const std::vector<double>&"):
"Returns an interpolator for yvals sampled in x in [xmin, xmax]"
return
def initialize(self,
xmin = "const double",
xmax = "const double",
yvals = "const std::vector<double>&"):
"Initializes the interpolator for yvals sampled in x in [xmin, xmax]"
@PYB11const
def __call__(self,
x = "const double"):
"Returns the interpolated value <y>(x)"
return "double"
@PYB11const
def prime(self,
x = "const double"):
"Interpolator for the first derivative: <dy/dx>(x)"
return "double"
@PYB11const
def prime2(self,
x = "const double"):
"Interpolator for the second derivative: <d^2y/dx^2>(x)"
return "double"
@PYB11pyname("__call__")
@PYB11const
def __call__i0(self,
x = "const double",
i0 = "const size_t"):
"Returns the interpolated value <y>(x)"
return "double"
@PYB11pycppname("prime")
@PYB11const
def prime_i0(self,
x = "const double",
i0 = "const size_t"):
"Interpolator for the first derivative: <dy/dx>(x)"
return "double"
@PYB11pycppname("prime2")
@PYB11const
def prime2_i0(self,
x = "const double",
i0 = "const size_t"):
"Interpolator for the second derivative: <d^2y/dx^2>(x)"
return "double"
@PYB11const
def lowerBound(self,
x = "const double"):
"Return the lower bound index in the table for the given x coordinate"
return "size_t"
# Attributes
size = PYB11property(doc="The size of the tabulated coefficient arrays")
xmin = PYB11property(doc="Minimum x coordinate for table")
xmax = PYB11property(doc="Maximum x coordinate for table")
xstep = PYB11property(doc="delta x between tabulated values")
coeffs = PYB11property(doc="the fitting coefficients")
```
#### File: src/SimulationControl/bisectFunction.py
```python
def bisectFunction(functor, x1, x2, ytarget, xaccuracy):
y1 = functor(x1)
y2 = functor(x2)
assert (y2 - y1)*(y2 - ytarget) >= 0.0
while abs(x2 - x1) > xaccuracy:
xmid = 0.5*(x1 + x2)
ymid = functor(xmid)
dymid = ymid - ytarget
dytar = ytarget - y1
if dymid*dytar > 0.0:
x2 = xmid
else:
x1 = xmid
y1 = ymid
return 0.5*(x1 + x2)
```
#### File: src/SimulationControl/buildOrderedDict.py
```python
import collections
def buildOrderedDict(*args):
result = collections.OrderedDict()
for (key, val) in args:
result[key] = val
return result
```
#### File: src/SimulationControl/EulerianTracerHistory.py
```python
import os
import mpi
import Spheral
from NodeHistory import NodeHistory
class EulerianTracerHistory(Spheral.RestartableObject):
# Constructor.
def __init__(self,
geometry,
position,
samplefunc,
W,
db,
filename,
header = None,
labels = None,
initializefunc = None,
weightfunc = None,
):
self.restart = Spheral.RestartableObject(self)
# Set up our internal data.
self.geometry = geometry
self.position = position
self.samplefunc = samplefunc
self.initializefunc = initializefunc
self.weightfunc = weightfunc
self.W = W
self.db = db
self.filename = filename
self.labels = labels
self.cycleHistory = []
self.timeHistory = []
self.sampleHistory = []
# Open the history file.
self.file = None
if mpi.rank == 0:
if os.path.exists(self.filename):
os.remove(self.filename)
self.file = open(self.filename, "w")
assert self.file is not None
# Write the optional header string.
if header:
self.file.write(header + "\n")
# Write the optional label line
if labels:
self.file.write(("# " + ((len(labels) + 2)*'"%20s" ') + "\n") % (("cycle", "time") + labels))
return
# This is the method you add to periodic work.
def sample(self, cycle, ttime, dt):
# Import the geometry appropriate Spheral types.
assert self.geometry in ("1d", "2d", "3d", "RZ")
exec("from Spheral%s import *" % self.geometry)
# Do we need to initialize anything?
if self.initializefunc:
self.initializefunc()
# How many sample values are we going for?
for nodeListi, nodeList in enumerate(self.db.fluidNodeLists()):
if nodeList.numNodes > 0:
nvals = len(self.samplefunc(nodeListi, 0))
assert nvals > 0
# Prepare empty slots in the history.
self.cycleHistory.append(cycle)
self.timeHistory.append(ttime)
self.sampleHistory.append([0.0]*nvals)
Wsum = 0.0
# Grab position and H FieldLists.
positions = self.db.globalPosition
H = self.db.globalHfield
Hmin = 1e60*SymTensor.one # Since H is in inverse length, need a big number
# Prepare the Neighbor information for sampling at this pos, and walk the neighbors.
numNodeLists = self.db.numFluidNodeLists
masterLists = vector_of_vector_of_int()
coarseNeighbors = vector_of_vector_of_int()
refineNeighbors = vector_of_vector_of_int()
self.db.setMasterNodeLists(self.position, Hmin, masterLists, coarseNeighbors)
assert len(coarseNeighbors) == numNodeLists
self.db.setRefineNodeLists(self.position, Hmin, coarseNeighbors, refineNeighbors)
for nodeListj in xrange(numNodeLists):
for j in refineNeighbors[nodeListj]:
# Compute the weighting for this position.
posj = positions(nodeListj, j)
Hj = H(nodeListj, j)
Wj = self.W.kernelValue((Hj*(posj - self.position)).magnitude(), 1.0)**2
if self.weightfunc:
Wj *= self.weightfunc(posj)
Wsum += Wj
# Use the user supplied method to extract the field values for this (nodeList, index)
fieldvals = self.samplefunc(nodeListj, j)
assert len(fieldvals) == nvals
# Increment the sampled values for this position.
for i in xrange(nvals):
self.sampleHistory[-1][i] += Wj*fieldvals[i]
# Normalize the measurements.
Wsum = max(1.0e-10, mpi.allreduce(Wsum, mpi.SUM))
for i in xrange(nvals):
self.sampleHistory[-1][i] = mpi.allreduce(self.sampleHistory[-1][i], mpi.SUM)/Wsum
# Update the history file.
if mpi.rank == 0:
assert self.file is not None
samplestr = ""
for x in self.sampleHistory[-1]:
samplestr += str(x) + " "
self.file.write("%i \t %g \t %s\n" % (cycle, ttime, samplestr))
self.file.flush()
return
# Recreate the output file, flushing our full history to it.
def flushHistory(self):
if mpi.rank == 0:
assert self.file is not None
n = len(self.cycleHistory)
assert len(self.timeHistory) == n
assert len(self.sampleHistory) == n
if mpi.rank == 0:
for i in xrange(n):
samplestr = ""
for x in self.sampleHistory[i]:
samplestr += str(x) + " "
self.file.write("%i \t %g \t %s\n" % (self.cycleHistory[i],
self.timeHistory[i],
samplestr))
self.file.flush()
return
# Label for restart.
def label(self):
return "EulerianTracerHistory"
# Write restart.
def dumpState(self, file, path):
file.writeObject(self.filename, path + "/filename")
file.writeObject(self.cycleHistory, path + "/cycleHistory")
file.writeObject(self.timeHistory, path + "/timeHistory")
file.writeObject(self.sampleHistory, path + "/sampleHistory")
return
# Read restart.
def restoreState(self, file, path):
self.filename = file.readObject(path + "/filename")
self.cycleHistory = file.readObject(path + "/cycleHistory")
self.timeHistory = file.readObject(path + "/timeHistory")
self.sampleHistory = file.readObject(path + "/sampleHistory")
self.flushHistory()
return
```
#### File: src/SimulationControl/newtonRaphson.py
```python
from SpheralTestUtilities import *
def newtonRaphson(functor,
x1,
x2,
xaccuracy = 1.0e-15,
yaccuracy = 1.0e-15,
maxIterations = 100):
#- Initialize values for the function and it's derivative.
xminValues = functor(x1);
xmaxValues = functor(x2);
#- Is the root already at the min or max range?
if fuzzyEqual(xminValues.first, 0.0, yaccuracy):
return x1
if fuzzyEqual(xmaxValues.first, 0.0, yaccuracy):
return x2
#- Make sure the root is bracketed by the input range.
if distinctlyGreaterThan(xminValues.first * xmaxValues.first, 0.0, yaccuracy):
raise RuntimeError, "newtonRaphson ERROR: root must be bracketed by input range: (%g %g) (%g %g) " % (xminValues.first,
xminValues.second,
xmaxValues.first,
xmaxValues.second)
#- Initialize the searching parameters.
xl = 0.0
xh = 0.0
if xminValues.first < 0.0:
xl = x1
xh = x2
else:
assert xminValues.first > 0.0 and xmaxValues.first < 0.0
xl = x2;
xh = x1;
rootSafe = 0.5*(x1 + x2)
dxold = abs(x2 - x1)
dx = dxold
fdf = functor(rootSafe)
f = fdf.first
df = fdf.second
#- Iterate until we either converge or achieve the desired accuracy.
iter = 0
while iter < maxIterations:
iter += 1
#- Bisect if Newton out of range or not decreasing fast enough.
if (((rootSafe - xh)*df - f)*((rootSafe - xl)*df - f) > 0.0 or
abs(2.0*f) > abs(dxold*df)):
dxold = dx
dx = 0.5*(xh - xl)
rootSafe = xl + dx
if (fuzzyEqual(xl, rootSafe, xaccuracy)):
return rootSafe
else:
#- Take a Newton-Raphson step.
assert not fuzzyEqual(df, 0.0)
dxold = dx
dx = f/df
tmp = rootSafe
rootSafe -= dx
if fuzzyEqual(tmp, rootSafe, xaccuracy):
return rootSafe
if abs(dx) <= xaccuracy:
return rootSafe
fdf = functor(rootSafe)
f = fdf.first
df = fdf.second
if f < 0.0:
xl = rootSafe
else:
xh = rootSafe
raise "newtonRaphson ERROR: did not converge!"
```
#### File: src/SimulationControl/NodeHistory.py
```python
from math import *
import Spheral
import mpi
#-------------------------------------------------------------------------------
# A class for tracking the history of a given set of nodes.
#-------------------------------------------------------------------------------
class NodeHistory:
def __init__(self,
nodeList,
nodeIndices,
sampleMethod,
filename,
header = None,
labels = None):
self.restart = Spheral.RestartableObject(self)
self.nodeList = nodeList
self.sampleMethod = sampleMethod
self.filename = filename
self.cycleHistory = []
self.timeHistory = []
self.sampleHistory = []
# Figure out the dimensionality.
FieldConstructor = None
if isinstance(nodeList, Spheral.NodeList1d):
FieldConstructor = Spheral.IntField1d
elif isinstance(nodeList, Spheral.NodeList2d):
FieldConstructor = Spheral.IntField2d
elif isinstance(nodeList, Spheral.NodeList3d):
FieldConstructor = Spheral.IntField3d
assert FieldConstructor is not None
# Store the set of nodes we're going to sample as a field of flags.
# This should automatically be safe as NodeLists/Fields get renumbered,
# redistributed, deleted, added, or what have you.
self.nodeFlags = FieldConstructor("flag nodes", nodeList, 0)
if nodeIndices is None:
nodeIndices = range(nodeList.numInternalNodes)
self.nodeIndices = nodeIndices
if isinstance(nodeIndices, list):
for i in nodeIndices:
assert i >= 0 and i < nodeList.numInternalNodes
self.nodeFlags[i] = 1
else:
self.currentNodeIndices()
# Open the history file.
self.file = None
if mpi.rank == 0:
self.file = open(self.filename, "w")
assert self.file is not None
# Write the optional header string.
if header:
self.file.write(header + "\n")
# Write the optional label line
if labels:
self.file.write(("# " + ((len(labels) + 2)*'"%20s" ') + "\n") % (("cycle", "time") + labels))
return
def currentNodeIndices(self):
if isinstance(self.nodeIndices, list):
return [i for i in range(self.nodeList.numInternalNodes)
if self.nodeFlags[i] == 1]
else:
result = self.nodeIndices(self.nodeList)
self.nodeFlags.Zero()
for i in result:
assert i >= 0 and i < self.nodeList.numInternalNodes
self.nodeFlags[i] = 1
return result
def sample(self, cycle, t, dt):
# Get the set of nodes.
nodeIndices = self.currentNodeIndices()
# Get the result of the sampling method.
result = self.sampleMethod(self.nodeList, nodeIndices)
# Update our history variables.
self.cycleHistory.append(cycle)
self.timeHistory.append(t)
self.sampleHistory.append(result)
# Update the history file.
if mpi.rank == 0:
assert self.file is not None
if isinstance(result, tuple):
samplestr = ""
for x in result:
samplestr += str(x) + " "
else:
samplestr = str(result)
self.file.write("%i \t %g \t %s\n" % (cycle, t, samplestr))
self.file.flush()
return
def flushHistory(self):
if mpi.rank == 0:
assert self.file is not None
n = len(self.cycleHistory)
assert len(self.timeHistory) == n
assert len(self.sampleHistory) == n
if mpi.rank == 0:
for i in xrange(n):
if isinstance(self.sampleHistory[i], tuple):
samplestr = ""
for x in self.sampleHistory[i]:
samplestr += str(x) + " "
else:
samplestr = str(self.sampleHistory[i])
self.file.write("%i \t %g \t %s\n" % (self.cycleHistory[i],
self.timeHistory[i],
samplestr))
self.file.flush()
return
def label(self):
return "NodeHistory"
def dumpState(self, file, path):
file.writeObject(self.filename, path + "/filename")
file.writeObject(self.cycleHistory, path + "/cycleHistory")
file.writeObject(self.timeHistory, path + "/timeHistory")
file.writeObject(self.sampleHistory, path + "/sampleHistory")
file.write(self.nodeFlags, path + "/nodeFlags")
return
def restoreState(self, file, path):
try:
self.filename = file.readObject(path + "/filename")
self.cycleHistory = file.readObject(path + "/cycleHistory")
self.timeHistory = file.readObject(path + "/timeHistory")
self.sampleHistory = file.readObject(path + "/sampleHistory")
file.read(self.nodeFlags, path + "/nodeFlags")
self.flushHistory()
except RuntimeError:
print "WARNING: unable to restore NodeHistory restart state"
return
def __call__(self, cycle, t, dt):
self.sample(cycle, t, dt)
return
```
#### File: src/SimulationControl/resampleNodeList.py
```python
import mpi
import VoronoiDistributeNodes
import SolidSpheral
#...........................................................................
# A local helper method for copying data from one NodeList to another.
#...........................................................................
def copyNodeListFields(nodes0, nodes1, mask, solid):
m0 = nodes0.mass()
p0 = nodes0.positions()
v0 = nodes0.velocity()
H0 = nodes0.Hfield()
r0 = nodes0.massDensity()
e0 = nodes0.specificThermalEnergy()
m1 = nodes1.mass()
p1 = nodes1.positions()
v1 = nodes1.velocity()
H1 = nodes1.Hfield()
r1 = nodes1.massDensity()
e1 = nodes1.specificThermalEnergy()
if solid:
S0 = nodes0.deviatoricStress()
ps0 = nodes0.plasticStrain()
psr0 = nodes0.plasticStrainRate()
D0 = nodes0.damage()
S1 = nodes1.deviatoricStress()
ps1 = nodes1.plasticStrain()
psr1 = nodes1.plasticStrainRate()
D1 = nodes1.damage()
j = 0
for i in xrange(nodes0.numInternalNodes):
if mask[i] == 1:
assert j < nodes1.numInternalNodes
m1[j] = m0[i]
p1[j] = p0[i]
v1[j] = v0[i]
H1[j] = H0[i]
r1[j] = r0[i]
e1[j] = e0[i]
if solid:
S1[j] = S0[i]
ps1[j] = ps0[i]
psr1[j] = psr0[i]
D1[j] = D0[i]
j += 1
return
#-------------------------------------------------------------------------------
# Resample to a new set of nodes represented by a generator.
#-------------------------------------------------------------------------------
def resampleNodeList(nodes,
generator,
W,
mask = None,
etaExclude = None,
removeUnusedNodes = True):
# Check our dimensionality
if isinstance(nodes, SolidSpheral.NodeList1d):
ndim = 1
elif isinstance(nodes, SolidSpheral.NodeList2d):
ndim = 2
elif isinstance(nodes, SolidSpheral.NodeList3d):
ndim = 3
else:
raise ValueError, "Unknown thing %s handed in: expected a NodeList" % nodes
ndim0 = ndim
exec "from SolidSpheral%id import *" % ndim # Load the aliases for our dimensionality
ndim = ndim0
exec "from VoronoiDistributeNodes import distributeNodes%id as distributor" % ndim
# Clear out any initial ghost nodes.
nodes.numGhostNodes = 0
# Check if we're doing a Solid or FluidNodeList.
if isinstance(nodes, SolidNodeList):
solid = True
NLF = makeSolidNodeList
elif isinstance(nodes, FluidNodeList):
solid = False
NLF = makeFluidNodeList
else:
raise RuntimeError, "Unknown NodeList type."
# Check how to set the new neighbor info.
if isinstance(nodes._neighbor, NestedGridNeighbor):
topGridSize = nodes._neighbor.topGridSize
xmin = Vector.zero
xmax = Vector.one * topGridSize
NeighborType = NestedGridNeighbor
if mpi.procs > 1:
dbc = NestedGridDistributedBoundary.instance()
elif isinstance(nodes._neighbor, TreeNeighbor):
xmin = nodes._neighbor.xmin
xmax = nodes._neighbor.xmax
topGridSize = (xmax - xmin).maxAbsElement()
NeighborType = TreeNeighbor
if mpi.procs > 1:
dbc = BoundingVolumeDistributedBoundary.instance()
#raise RuntimeError, "Need a parallel policy for TreeNeighbor."
else:
raise RuntimeError, "Unknown Neighbor type."
# Build a temporary NodeList we'll use to sample to.
newnodes = NLF(name = "zza_newnodes",
eos = nodes.eos,
hmin = 1e-10,
hmax = 1e10,
NeighborType = NeighborType,
topGridCellSize = topGridSize,
xmin = xmin,
xmax = xmax)
if mask:
masknodes = NLF(name = "zzz_masknodes",
eos = nodes.eos,
hmin = 1e-10,
hmax = 1e10,
NeighborType = NeighborType,
topGridCellSize = topGridSize,
xmin = xmin,
xmax = xmax)
distributor((newnodes, generator))
# If we're parallel we need distributed ghost nodes.
bcs = vector_of_Boundary()
if mpi.procs > 1:
db = DataBase()
db.appendNodeList(nodes)
db.appendNodeList(newnodes)
nodes.neighbor().updateNodes()
newnodes.neighbor().updateNodes()
dbc.setAllGhostNodes(db)
dbc.finalizeGhostBoundary()
bcs.append(dbc)
# If we're masking some points, things get complicated. The mask nodes are going to persist to the new
# nodes, and so we need to not overlay them. We also want to remove any new nodes that overlap with the
# mask nodes, since the masked ones are going to be copied to the new nodes in the end.
nmask = 0
if mask:
# Copy the field values from the original masked nodes to the temporary mask set.
nmask = mask.localSumElements()
print "Copying %i masked nodes from the original NodeList." % mpi.allreduce(nmask, mpi.SUM)
masknodes.numInternalNodes = nmask
copyNodeListFields(nodes, masknodes, mask, solid)
# Remove the mask nodes from the starting NodeList.
nodes2kill = vector_of_int()
for i in xrange(nodes.numInternalNodes):
if mask[i] == 1:
nodes2kill.append(i)
assert nodes2kill.size() == nmask
nodes.deleteNodes(nodes2kill)
# Now we need to remove any nodes from the target set that overlap with the mask nodes.
db = DataBase()
db.appendNodeList(newnodes)
db.appendNodeList(masknodes)
newnodes.neighbor().updateNodes()
masknodes.neighbor().updateNodes()
if mpi.procs > 1:
dbc.setAllGhostNodes(db)
dbc.finalizeGhostBoundary()
newnodes.neighbor().updateNodes()
masknodes.neighbor().updateNodes()
db.updateConnectivityMap(False)
cm = db.connectivityMap()
if etaExclude is None:
etaExclude = 1.0/nodes.nodesPerSmoothingScale
assert etaExclude > 0.0
posmask = masknodes.positions()
Hmask = masknodes.Hfield()
posnew = newnodes.positions()
Hnew = newnodes.Hfield()
nodes2kill = vector_of_int()
for i in xrange(newnodes.numInternalNodes):
fullconnectivity = cm.connectivityForNode(0, i)
for j in fullconnectivity[1]:
eta = min(( Hnew[i]*(posmask[j] - posnew[i])).magnitude(),
(Hmask[j]*(posmask[j] - posnew[i])).magnitude())
if eta < etaExclude:
nodes2kill.append(i)
print "Removing %i nodes from new list due to overlap with masked nodes." % mpi.allreduce(len(nodes2kill), mpi.SUM)
newnodes.deleteNodes(nodes2kill)
# Build the connectivity so we can do the overlay.
db = DataBase()
db.appendNodeList(nodes)
db.appendNodeList(newnodes)
nodes.neighbor().updateNodes()
newnodes.neighbor().updateNodes()
if mpi.procs > 1:
dbc.setAllGhostNodes(db)
dbc.finalizeGhostBoundary()
nodes.neighbor().updateNodes()
newnodes.neighbor().updateNodes()
# Convert fields we're going to map to conserved values. This is necessary 'cause the splat operation we're going
# to use guarantees summing over the input and output field values gives the same value.
mass = nodes.mass()
rho = nodes.massDensity()
vol = ScalarField(nodes.mass())
vel = nodes.velocity()
eps = nodes.specificThermalEnergy()
momentum = VectorField(vel)
thermalenergy = ScalarField(eps)
for i in xrange(nodes.numNodes):
vol[i] /= rho[i] + 1.0e-30
momentum[i] *= mass[i]
thermalenergy[i] *= mass[i]
if solid:
S = nodes.deviatoricStress()
ps = nodes.plasticStrain()
D = nodes.damage()
mS = SymTensorField(S)
mps = ScalarField(ps)
mD = SymTensorField(D)
for i in xrange(nodes.numNodes):
mS[i] *= mass[i]
mps[i] *= mass[i]
mD[i] *= mass[i]
# Map stuff from the old to new nodes.
fls = FieldListSet()
mass_fl = ScalarFieldList()
vol_fl = ScalarFieldList()
momentum_fl = VectorFieldList()
thermalenergy_fl = ScalarFieldList()
mass_fl.appendField(mass)
vol_fl.appendField(vol)
momentum_fl.appendField(momentum)
thermalenergy_fl.appendField(thermalenergy)
mass_fl.copyFields()
vol_fl.copyFields()
momentum_fl.copyFields()
thermalenergy_fl.copyFields()
fls.ScalarFieldLists.append(mass_fl)
fls.ScalarFieldLists.append(vol_fl)
fls.VectorFieldLists.append(momentum_fl)
fls.ScalarFieldLists.append(thermalenergy_fl)
if solid:
S_fl = SymTensorFieldList()
ps_fl = ScalarFieldList()
D_fl = SymTensorFieldList()
S_fl.appendField(mS)
ps_fl.appendField(mps)
D_fl.appendField(mD)
S_fl.copyFields()
ps_fl.copyFields()
D_fl.copyFields()
fls.SymTensorFieldLists.append(S_fl)
fls.ScalarFieldLists.append(ps_fl)
fls.SymTensorFieldLists.append(D_fl)
pos0_fl = VectorFieldList()
mass0_fl = ScalarFieldList()
H0_fl = SymTensorFieldList()
pos0_fl.appendField(nodes.positions())
mass0_fl.appendField(nodes.mass())
H0_fl.appendField(nodes.Hfield())
pos1_fl = VectorFieldList()
mass1_fl = ScalarFieldList()
H1_fl = SymTensorFieldList()
pos1_fl.appendField(newnodes.positions())
mass1_fl.appendField(newnodes.mass())
H1_fl.appendField(newnodes.Hfield())
pos0_fl.copyFields()
mass0_fl.copyFields()
H0_fl.copyFields()
pos1_fl.copyFields()
mass1_fl.copyFields()
H1_fl.copyFields()
# Apply boundaries to the Fields we're sampling from.
for bc in bcs:
bc.applyFieldListGhostBoundary(mass0_fl)
bc.applyFieldListGhostBoundary(mass1_fl)
for fl in fls.ScalarFieldLists:
bc.applyFieldListGhostBoundary(fl)
for fl in fls.VectorFieldLists:
bc.applyFieldListGhostBoundary(fl)
for fl in fls.TensorFieldLists:
bc.applyFieldListGhostBoundary(fl)
for fl in fls.SymTensorFieldLists:
bc.applyFieldListGhostBoundary(fl)
bc.finalizeGhostBoundary()
print "Splatting fields..."
newfls = splatMultipleFieldsMash(fls,
pos0_fl, mass0_fl, H0_fl, W,
pos1_fl, mass1_fl, H1_fl,
bcs)
print "Done splatting."
# Grab the FieldLists
pos0 = nodes.positions()
H0 = nodes.Hfield()
pos1 = newnodes.positions()
H1 = newnodes.Hfield()
mass1 = newfls.ScalarFieldLists[0][0]
vol1 = newfls.ScalarFieldLists[1][0]
momentum1 = newfls.VectorFieldLists[0][0]
thermalenergy1 = newfls.ScalarFieldLists[2][0]
# Denormalize the mapped values and fill them in as new values for the nodes.
nodes.numInternalNodes = nmask + newnodes.numInternalNodes
for i in xrange(newnodes.numInternalNodes):
j = nmask + i
pos0[j] = pos1[i]
H0[j] = H1[i]
if mass1[i] > 0.0:
assert vol1[i] > 0.0
mass[j] = mass1[i]
rho[j] = mass1[i]/vol1[i]
vel[j] = momentum1[i]/mass1[i]
eps[j] = thermalenergy1[i]/mass1[i]
else:
mass[j] = newnodes.mass()[i]
rho[j] = newnodes.massDensity()[i]
vel[j] = newnodes.velocity()[i]
eps[j] = newnodes.specificThermalEnergy()[i]
if solid:
mS1 = newfls.SymTensorFieldLists[0][0]
mps1 = newfls.ScalarFieldLists[3][0]
mD1 = newfls.SymTensorFieldLists[1][0]
for i in xrange(newnodes.numInternalNodes):
j = nmask + i
if mass1[i] > 0.0:
S[j] = mS1[i]/mass1[i]
ps[j] = mps1[i]/mass1[i]
D[j] = mD1[i]/mass1[i]
# Look for any nodes that didn't get any information in the new set and delete them.
if removeUnusedNodes:
nodes2kill = vector_of_int()
for i in xrange(newnodes.numInternalNodes):
if mass1[i] == 0.0:
nodes2kill.append(i)
if nodes2kill.size() > 0:
newnodes.deleteNodes(nodes2kill)
# Insert any masked nodes, and we're done.
if mask:
newmask = [1]*nmask + [0]*nodes.numInternalNodes
copyNodeListFields(masknodes, nodes, newmask, solid)
# Whew!
print "Finished resampling nodes: final node count %i." % mpi.allreduce(nodes.numInternalNodes, mpi.SUM)
return
```
#### File: src/SimulationControl/SpheralConservation.py
```python
import mpi
from SpheralCompiledPackages import *
#-------------------------------------------------------------------------------
# Conservation
#-------------------------------------------------------------------------------
class SpheralConservation:
#---------------------------------------------------------------------------
# Constructor
#---------------------------------------------------------------------------
def __init__(self, dataBase,
packages = []):
self.restart = RestartableObject(self)
self.dataBase = dataBase
self.packages = packages
self.cycleHistory = []
self.timeHistory = []
self.massHistory = []
self.pmomHistory = []
self.amomHistory = []
self.KEHistory = []
self.TEHistory = []
self.EEHistory = []
self.EHistory = []
self.Vector = eval("Vector%id" % dataBase.nDim)
self.origin = self.Vector()
# Start the conservation history
self.updateHistory()
return
#---------------------------------------------------------------------------
# Add the current state to the history variables.
#---------------------------------------------------------------------------
def updateHistory(self, cycle=0, time=0.0):
self.cycleHistory.append(cycle)
self.timeHistory.append(time)
self.massHistory.append(self.findTotalMass())
self.pmomHistory.append(self.findTotalPmom())
self.amomHistory.append(self.findTotalAmom())
self.KEHistory.append(self.findTotalKE())
self.TEHistory.append(self.findTotalTE())
self.EEHistory.append(self.findTotalPackageEnergy())
self.EHistory.append(self.KEHistory[-1] +
self.TEHistory[-1] +
self.EEHistory[-1])
return
#---------------------------------------------------------------------------
# Determine the current total mass.
#---------------------------------------------------------------------------
def findTotalMass(self):
total = 0.0
massFL = self.dataBase.globalMass
for mass in massFL:
massValues = mass.internalValues()
total += sum(list(massValues) + [0.0])
return mpi.allreduce(total, mpi.SUM)
#---------------------------------------------------------------------------
# Determine the current total linear momentum.
#---------------------------------------------------------------------------
def findTotalPmom(self):
total = self.Vector()
massFL = self.dataBase.globalMass
velocityFL = self.dataBase.globalVelocity
for (mass, velocity) in zip(massFL, velocityFL):
massValues = mass.internalValues()
velocityValues = velocity.internalValues()
for mi, vi in zip(massValues, velocityValues):
total += mi*vi
# Tally momentum from packages.
for package in self.packages:
packageValue = package.extraMomentum()
total += packageValue
return mpi.allreduce(total, mpi.SUM)
#---------------------------------------------------------------------------
# Determine the current total angular momentum, with reference to the
# stored origin.
#---------------------------------------------------------------------------
def findTotalAmom(self):
total = Vector3d()
massFL = self.dataBase.globalMass
positionFL = self.dataBase.globalPosition
velocityFL = self.dataBase.globalVelocity
for (mass, position, velocity) in zip(massFL, positionFL, velocityFL):
massValues = mass.internalValues()
positionValues = position.internalValues()
velocityValues = velocity.internalValues()
for (mi, ri, vi) in zip(massValues, positionValues, velocityValues):
# Find the displacement of this node from the origin.
dr = ri - self.origin
# Now add this node angular momentum.
if self.dataBase.nDim == 2:
total.z += mi*(dr.x*vi.y - dr.y*vi.x)
elif self.dataBase.nDim == 3:
total += mi * dr.cross(vi)
return mpi.allreduce(total, mpi.SUM)
#---------------------------------------------------------------------------
# Determine the current total kinetic energy.
#---------------------------------------------------------------------------
def findTotalKE(self):
total = 0.0
massFL = self.dataBase.globalMass
velocityFL = self.dataBase.globalVelocity
for (mass, velocity) in zip(massFL, velocityFL):
massValues = mass.internalValues()
velocityValues = velocity.internalValues()
total += sum([mi*vi.magnitude2() for (mi, vi) in zip(massValues, velocityValues)] + [0.0])
return 0.5*mpi.allreduce(total, mpi.SUM)
#---------------------------------------------------------------------------
# Determine the current total thermal energy.
#---------------------------------------------------------------------------
def findTotalTE(self):
total = 0.0
massFL = self.dataBase.fluidMass
epsFL = self.dataBase.fluidSpecificThermalEnergy
for (mass, eps) in zip(massFL, epsFL):
massValues = mass.internalValues()
epsValues = eps.internalValues()
total += sum([mi*epsi for (mi, epsi) in zip(list(mass.internalValues()),
list(eps.internalValues()))] + [0.0])
return mpi.allreduce(total, mpi.SUM)
#---------------------------------------------------------------------------
# Determine the current total package (or "external") energy.
#---------------------------------------------------------------------------
def findTotalPackageEnergy(self):
total = 0.0
for package in self.packages:
total += package.extraEnergy()
return total # Note we assume this has already been parallel summed.
#---------------------------------------------------------------------------
# Write the history to the given file.
#---------------------------------------------------------------------------
def writeHistory(self, filename):
f = open(filename, 'w')
labels = ['"cycle"', '"time"',
'"Mass"',
'"Lin Mom Mag"', '"Lin Mom X"', '"Lin Mom Y"', '"Lin Mom Z"',
'"Ang Mom Mag"', '"Ang Mom X"', '"Ang Mom Y"', '"Ang Mom Z"',
'"Total E"', '"Kin E"', '"Therm E"', '"Pkg E"']
f.write('#')
for lab in labels:
f.write('%14s ' % lab)
f.write('\n')
for i in xrange(len(self.cycleHistory)):
for var in [self.cycleHistory[i], self.timeHistory[i],
self.massHistory[i],
self.pmomHistory[i].magnitude(),
self.pmomHistory[i].x,
self.pmomHistory[i].y,
self.pmomHistory[i].z,
self.amomHistory[i].magnitude(),
self.amomHistory[i].x,
self.amomHistory[i].y,
self.amomHistory[i].z,
self.EHistory[i],
self.KEHistory[i],
self.TEHistory[i],
self.EEHistory[i]]:
f.write('%14.8g ' % var)
f.write('\n')
f.close()
return
#---------------------------------------------------------------------------
# label
#---------------------------------------------------------------------------
def label(self):
return "SpheralConservation"
#---------------------------------------------------------------------------
# dumpState
#---------------------------------------------------------------------------
def dumpState(self, file, path):
file.writeObject(self.cycleHistory, path + "/cycleHistory")
file.writeObject(self.timeHistory, path + "/timeHistory")
file.writeObject(self.massHistory, path + "/massHistory")
file.writeObject(self.pmomHistory, path + "/pmomHistory")
file.writeObject(self.amomHistory, path + "/amomHistory")
file.writeObject(self.KEHistory, path + "/KEHistory")
file.writeObject(self.TEHistory, path + "/TEHistory")
file.writeObject(self.EEHistory, path + "/EEHistory")
file.writeObject(self.EHistory, path + "/EHistory")
file.writeObject(self.origin, path + "/origin")
#---------------------------------------------------------------------------
# restoreState
#---------------------------------------------------------------------------
def restoreState(self, file, path):
self.cycleHistory = file.readObject(path + "/cycleHistory")
self.timeHistory = file.readObject(path + "/timeHistory")
self.massHistory = file.readObject(path + "/massHistory")
self.pmomHistory = file.readObject(path + "/pmomHistory")
self.amomHistory = file.readObject(path + "/amomHistory")
self.KEHistory = file.readObject(path + "/KEHistory")
self.TEHistory = file.readObject(path + "/TEHistory")
self.EEHistory = file.readObject(path + "/EEHistory")
self.EHistory = file.readObject(path + "/EHistory")
self.origin = file.readObject(path + "/origin")
```
#### File: src/SimulationControl/SpheralGnuPlotUtilities.py
```python
import Gnuplot
import mpi
from Spheral import *
from math import *
import numpy
import os
from SpheralTestUtilities import multiSort
SpheralGnuPlotCache = []
from spheralDimensions import spheralDimensions
dims = spheralDimensions()
#-------------------------------------------------------------------------------
# Define a dummy Gnuplot class, so that non-master processes can silently
# and harmlessly accept Gnuplot commands.
#-------------------------------------------------------------------------------
class fakeGnuplot:
def __init__(self):
return
def __call__(self, *arghs, **keyw):
return
def plot(self, *arghs, **keyw):
return
def replot(self, *arghs, **keyw):
return
def refresh(self, *arghs, **keyw):
return
def xlabel(self, *arghs, **keyw):
return
def ylabel(self, *arghs, **keyw):
return
def title(self, *arghs, **keyw):
return
def hardcopy(self, *arghs, **keyw):
return
def generateNewGnuPlot(persist = False):
if mpi.rank == 0:
result = Gnuplot.Gnuplot(persist = persist)
if "GNUTERM" in os.environ.keys():
result("set term %s" % os.environ["GNUTERM"])
return result
else:
return fakeGnuplot()
#-------------------------------------------------------------------------------
# Since the default Gnuplot.py doesn't support png output, I'll add it here
# myself.
#-------------------------------------------------------------------------------
def pngFile(plot, filename,
color = 1,
fontSize = "medium"):
setLine = "set terminal png " + fontSize
if color:
setLine += " color"
if filename[-4:] != ".png":
filename += ".png"
plot(setLine)
plot.set_string("output", filename)
plot.refresh()
plot("set terminal x11")
plot.set_string("output")
return
#-------------------------------------------------------------------------------
# Calculate the radial velocity component, given a FieldList of positions
# and a FieldList of velocities.
#-------------------------------------------------------------------------------
def radialVelocityFieldList(positions,
velocities):
dim = type(positions).__name__[-2:]
radialVelocity = None
fieldConstructor = None
if dim == "1d":
radialVelocity = ScalarFieldList1d()
fieldConstructor = ScalarField1d
elif dim == "2d":
radialVelocity = ScalarFieldList2d()
fieldConstructor = ScalarField2d
elif dim == "3d":
radialVelocity = ScalarFieldList3d()
fieldConstructor = ScalarField3d
radialVelocity.copyFields()
for field in positions:
radialVelocity.appendField(fieldConstructor("radial velocity", field.nodeList()))
assert positions.numFields == velocities.numFields == radialVelocity.numFields
for fieldID in xrange(positions.numFields):
rfield = positions[fieldID]
vfield = velocities[fieldID]
vrfield = radialVelocity[fieldID]
assert rfield.numElements == vfield.numElements == vrfield.numElements
for nodeID in xrange(rfield.numElements):
r = rfield[nodeID]
v = vfield[nodeID]
runit = r.unitVector()
vrfield[nodeID] = v.dot(runit)
return radialVelocity
#-------------------------------------------------------------------------------
# Calculate the azimuthal velocity component, given a FieldList of positions
# and a FieldList of velocities.
#-------------------------------------------------------------------------------
def azimuthalVelocityFieldList(positions,
velocities):
dim = type(positions).__name__[-2:]
azimuthalVelocity = None
fieldConstructor = None
if dim == "1d":
azimuthalVelocity = ScalarFieldList1d()
fieldConstructor = ScalarField1d
elif dim == "2d":
azimuthalVelocity = ScalarFieldList2d()
fieldConstructor = ScalarField2d
elif dim == "3d":
azimuthalVelocity = ScalarFieldList3d()
fieldConstructor = ScalarField3d
azimuthalVelocity.copyFields()
for field in positions:
azimuthalVelocity.appendField(fieldConstructor("azimuthal velocity", field.nodeList()))
assert positions.numFields == velocities.numFields == azimuthalVelocity.numFields
for fieldID in xrange(positions.numFields):
rfield = positions[fieldID]
vfield = velocities[fieldID]
vafield = azimuthalVelocity[fieldID]
assert rfield.numElements == vfield.numElements == vafield.numElements
for nodeID in xrange(rfield.numElements):
r = rfield[nodeID]
v = vfield[nodeID]
raz = r.unitVector()
x = raz.x
y = raz.y
raz.x = -y
raz.y = x
vafield[nodeID] = v.dot(raz)
return azimuthalVelocity
#-------------------------------------------------------------------------------
# Helper method to determine the angular momentum per node.
#-------------------------------------------------------------------------------
def angularMomentum(mass, position, velocity):
assert mass.numFields == position.numFields == velocity.numFields
result = []
for massField, positionField, velocityField in zip(mass,
position,
velocity):
assert (massField.nodeList().numInternalNodes ==
positionField.nodeList().numInternalNodes ==
velocityField.nodeList().numInternalNodes)
for j in xrange(massField.nodeList().numInternalNodes):
result.append((positionField[j].cross(velocityField[j]))*massField[j])
return result
#-------------------------------------------------------------------------------
# Plot a FieldList
#-------------------------------------------------------------------------------
def plotFieldList(fieldList,
xFunction = "%s.x",
yFunction = "%s",
plotGhosts = False,
colorNodeLists = False,
plot = None,
userXRange = [None, None],
userYRange = [None, None],
plotStyle = "lines",
lineStyle = "linetype -1 linewidth 1 pointtype 4 pointsize 1.0",
winTitle = None,
lineTitle = "",
xlabel = None,
ylabel = None,
filterFunc = None):
if plot is None:
plot = generateNewGnuPlot()
SpheralGnuPlotCache.append(plot)
def nullFilter(pos):
return True
if filterFunc is None:
filterFunc = nullFilter
# Gather the fieldList info across all processors to process 0.
globalNumNodes = []
globalX = []
globalY = []
for field in fieldList:
if plotGhosts:
xvals = field.nodeList().positions().allValues()
yvals = field.allValues()
else:
xvals = field.nodeList().positions().internalValues()
yvals = field.internalValues()
localX = []
localY = []
for x, y in zip(xvals, yvals):
if filterFunc(x):
localX.append(eval(xFunction % "x"))
localY.append(eval(yFunction % "y"))
n = len(localX)
if mpi:
globalNumNodes.append(mpi.allreduce(n, mpi.SUM))
globalX.extend(mpi.allreduce(localX, mpi.SUM))
globalY.extend(mpi.allreduce(localY, mpi.SUM))
else:
globalNumNodes.append(n)
globalX.extend(localX)
globalY.extend(localY)
if mpi.rank == 0:
# Find the total number of nodes.
totalNumNodes = sum(globalNumNodes)
assert(len(globalNumNodes) == fieldList.numFields)
assert(len(globalX) == totalNumNodes)
assert(len(globalY) == totalNumNodes)
# Copy the input ranges, since for some reason these seem to have been
# preserved between calls?
xRange = userXRange[:]
yRange = userYRange[:]
# Set the line style
## plot("set linestyle 1 " + lineStyle)
# Set the labels.
if winTitle: plot.title(winTitle)
if xlabel: plot.xlabel(xlabel)
if ylabel: plot.ylabel(ylabel)
# Set the ranges.
xmin = 1e30
xmax = -1e30
ymin = 1e30
ymax = -1e30
for x in globalX:
xmin = min(xmin, x)
xmax = max(xmax, x)
for y in globalY:
ymin = min(ymin, y)
ymax = max(ymax, y)
if xmin == xmax:
xmin = xmin - 0.5
xmax = xmax + 0.5
if ymin == ymax:
ymin = ymin - 0.5
ymax = ymax + 0.5
if xRange[0] == None: xRange[0] = xmin
if xRange[1] == None: xRange[1] = xmax
if yRange[0] == None: yRange[0] = ymin - 0.05*max(1e-5, ymax - ymin)
if yRange[1] == None: yRange[1] = ymax + 0.05*max(1e-5, ymax - ymin)
plot("set xrange [%f:%f]" % tuple(xRange))
plot("set yrange [%f:%f]" % tuple(yRange))
# Finally, loop over the fields and do the deed.
assert(len(globalX) == len(globalY))
if colorNodeLists:
legendNodeList = {}
for i in xrange(fieldList.numFields):
legendNodeList[i] = lineTitle + ": " + fieldList[i].nodeList().name
cumulativeNumNodes = 0
for fieldID in xrange(len(globalNumNodes)):
n = globalNumNodes[fieldID]
iNodeList = fieldID % fieldList.numFields
x = numpy.array(globalX[cumulativeNumNodes:
cumulativeNumNodes + n])
y = numpy.array(globalY[cumulativeNumNodes:
cumulativeNumNodes + n])
if n:
## plot("set linestyle %i lt %i pt %i" % (iNodeList + 1,
## iNodeList + 1,
## iNodeList + 1))
legend = legendNodeList[iNodeList]
legendNodeList[iNodeList] = None
data = Gnuplot.Data(x, y,
with_ = plotStyle + " lt %i" % iNodeList,
title = legend,
inline = True)
plot.replot(data)
SpheralGnuPlotCache.append(data)
cumulativeNumNodes += n
else:
x = numpy.array(globalX)
y = numpy.array(globalY)
data = Gnuplot.Data(x, y,
with_ = plotStyle + " lt -1 pt 3",
title = lineTitle,
inline = True)
plot.replot(data)
SpheralGnuPlotCache.append(data)
lineTitle = None
# That's it, return the Gnuplot object.
mpi.barrier()
return plot
#-------------------------------------------------------------------------------
# Plot the mass density, velocity, pressure, and smoothing scale for the fluid
# node lists in the given data base. Implicitly assuming 1-D.
#-------------------------------------------------------------------------------
def plotState(thingus,
plotGhosts = False,
colorNodeLists = False,
plotStyle = "points",
xFunction = "%s.x",
vecyFunction = "%s.x",
tenyFunction = "%s.xx ** -1",
lineTitle = "Simulation",
filterFunc = None):
dim = type(thingus).__name__[-2:]
if isinstance(thingus, eval("State%s" % dim)):
rho = thingus.scalarFields(HydroFieldNames.massDensity)
vel = thingus.vectorFields(HydroFieldNames.velocity)
eps = thingus.scalarFields(HydroFieldNames.specificThermalEnergy)
P = thingus.scalarFields(HydroFieldNames.pressure)
H = thingus.symTensorFields(HydroFieldNames.H)
else:
assert isinstance(thingus, eval("DataBase%s" % dim))
rho = thingus.fluidMassDensity
vel = thingus.fluidVelocity
eps = thingus.fluidSpecificThermalEnergy
P = thingus.newFluidScalarFieldList(0.0, "pressure")
thingus.fluidPressure(P)
H = thingus.fluidHfield
rhoPlot = plotFieldList(rho,
xFunction = xFunction,
plotGhosts = plotGhosts,
colorNodeLists = colorNodeLists,
plotStyle = plotStyle,
winTitle = "Mass Density",
lineTitle = lineTitle,
xlabel="x",
filterFunc = filterFunc)
velPlot = plotFieldList(vel,
xFunction = xFunction,
yFunction = vecyFunction,
plotGhosts = plotGhosts,
colorNodeLists = colorNodeLists,
plotStyle = plotStyle,
winTitle = "Velocity",
lineTitle = lineTitle,
xlabel="x",
filterFunc = filterFunc)
epsPlot = plotFieldList(eps,
xFunction = xFunction,
plotGhosts = plotGhosts,
colorNodeLists = colorNodeLists,
plotStyle = plotStyle,
winTitle = "Specific Thermal Energy",
lineTitle = lineTitle,
xlabel="x",
filterFunc = filterFunc)
PPlot = plotFieldList(P,
xFunction = xFunction,
plotGhosts = plotGhosts,
colorNodeLists = colorNodeLists,
plotStyle = plotStyle,
winTitle = "Pressure",
lineTitle = lineTitle,
xlabel="x",
filterFunc = filterFunc)
HPlot = plotFieldList(H,
xFunction = xFunction,
yFunction = tenyFunction,
plotGhosts = plotGhosts,
colorNodeLists = colorNodeLists,
plotStyle = plotStyle,
winTitle = "Smoothing scale",
lineTitle = lineTitle,
xlabel="x",
filterFunc = filterFunc)
return rhoPlot, velPlot, epsPlot, PPlot, HPlot
#-------------------------------------------------------------------------------
# Plot the state vs. radius
#-------------------------------------------------------------------------------
def plotRadialState(dataBase,
plotGhosts = False,
colorNodeLists = False,
lineTitle = "Simulation",
filterFunc = None):
rhoPlot = plotFieldList(dataBase.fluidMassDensity,
xFunction = "%s.magnitude()",
plotGhosts = plotGhosts,
colorNodeLists = colorNodeLists,
plotStyle = "points",
winTitle = "Mass density",
lineTitle = lineTitle,
xlabel = "r",
filterFunc = filterFunc)
radialVelocity = radialVelocityFieldList(dataBase.fluidPosition,
dataBase.fluidVelocity)
velPlot = plotFieldList(radialVelocity,
xFunction = "%s.magnitude()",
plotGhosts = plotGhosts,
colorNodeLists = colorNodeLists,
plotStyle = "points",
winTitle = " Radial Velocity",
lineTitle = lineTitle,
xlabel = "r",
filterFunc = filterFunc)
epsPlot = plotFieldList(dataBase.fluidSpecificThermalEnergy,
xFunction = "%s.magnitude()",
plotGhosts = plotGhosts,
colorNodeLists = colorNodeLists,
plotStyle = "points",
winTitle = "Specific Thermal Energy",
lineTitle = lineTitle,
xlabel = "r",
filterFunc = filterFunc)
fluidPressure = dataBase.newFluidScalarFieldList(0.0, "pressure")
dataBase.fluidPressure(fluidPressure)
PPlot = plotFieldList(fluidPressure,
xFunction = "%s.magnitude()",
plotGhosts = plotGhosts,
colorNodeLists = colorNodeLists,
plotStyle = "points",
winTitle = "Pressure",
lineTitle = lineTitle,
xlabel = "r",
filterFunc = filterFunc)
HPlot = plotFieldList(dataBase.fluidHfield,
xFunction = "%s.magnitude()",
yFunction = "%s.xx**-1",
plotGhosts = plotGhosts,
colorNodeLists = colorNodeLists,
plotStyle = "points",
winTitle = "Smoothing scale",
lineTitle = lineTitle,
xlabel = "r",
filterFunc = filterFunc)
return rhoPlot, velPlot, epsPlot, PPlot, HPlot
#-------------------------------------------------------------------------------
# Overplot the answer on results from plotState.
#-------------------------------------------------------------------------------
def plotAnswer(answerObject, time,
rhoPlot = None,
velPlot = None,
epsPlot = None,
PPlot = None,
APlot = None,
HPlot = None,
x = None):
try:
x, v, u, rho, P, h = answerObject.solution(time, x)
A = None
except:
try:
x, v, u, rho, P, A, h = answerObject.solution(time, x)
except:
x, v, u, rho, P = answerObject.solution(time, x)
A = None
h = None
if rhoPlot is not None:
data = Gnuplot.Data(x, rho,
with_="lines lt 7 lw 2",
title="Solution",
inline = True)
SpheralGnuPlotCache.append(data)
rhoPlot.replot(data)
if velPlot is not None:
data = Gnuplot.Data(x, v,
with_="lines lt 7 lw 2",
title="Solution",
inline = True)
SpheralGnuPlotCache.append(data)
velPlot.replot(data)
if epsPlot is not None:
data = Gnuplot.Data(x, u,
with_="lines lt 7 lw 2",
title="Solution",
inline = True)
SpheralGnuPlotCache.append(data)
epsPlot.replot(data)
if PPlot is not None:
data = Gnuplot.Data(x, P,
with_="lines lt 7 lw 2",
title="Solution",
inline = True)
SpheralGnuPlotCache.append(data)
PPlot.replot(data)
if APlot is not None and A:
data = Gnuplot.Data(x, A,
with_="lines lt 7 lw 2",
title="Solution",
inline = True)
SpheralGnuPlotCache.append(data)
APlot.replot(data)
if HPlot is not None:
data = Gnuplot.Data(x, h,
with_="lines lt 7 lw 2",
title="Solution",
inline = True)
SpheralGnuPlotCache.append(data)
HPlot.replot(data)
return
#-------------------------------------------------------------------------------
# Plot the node positions
#-------------------------------------------------------------------------------
def plotNodePositions2d(thingy,
xFunction = "%s.x",
yFunction = "%s.y",
plotGhosts = False,
colorNodeLists = True,
colorDomains = False,
title = "",
style = "points",
persist = None):
assert colorNodeLists + colorDomains <= 1
if isinstance(thingy, DataBase2d):
nodeLists = thingy.nodeLists()
else:
nodeLists = thingy
# Gather the node positions across all domains.
# Loop over all the NodeLists.
xNodes = []
yNodes = []
for nodeList in nodeLists:
if plotGhosts:
pos = nodeList.positions().allValues()
else:
pos = nodeList.positions().internalValues()
xNodes.append([eval(xFunction % "x") for x in pos])
yNodes.append([eval(yFunction % "x") for x in pos])
assert len(xNodes) == len(nodeLists)
assert len(xNodes) == len(yNodes)
globalXNodes = mpi.gather(xNodes)
globalYNodes = mpi.gather(yNodes)
if mpi.rank == 0:
assert len(globalXNodes) == mpi.procs
assert len(globalYNodes) == mpi.procs
xlist, ylist = [], []
if colorDomains:
for xDomain, yDomain in zip(globalXNodes, globalYNodes):
assert len(xDomain) == len(nodeLists)
assert len(yDomain) == len(nodeLists)
xlist.append([])
ylist.append([])
for xx in xDomain:
xlist[-1].extend(xx)
for yy in yDomain:
ylist[-1].extend(yy)
assert len(xlist) == mpi.procs
assert len(ylist) == mpi.procs
elif colorNodeLists:
for i in xrange(len(nodeLists)):
xlist.append([])
ylist.append([])
for xDomain, yDomain in zip(globalXNodes, globalYNodes):
assert len(xDomain) == len(nodeLists)
assert len(yDomain) == len(nodeLists)
for i in xrange(len(nodeLists)):
xlist[i].extend(xDomain[i])
ylist[i].extend(yDomain[i])
assert len(xlist) == len(nodeLists)
assert len(ylist) == len(nodeLists)
else:
xlist, ylist = [[]], [[]]
for xDomain, yDomain in zip(globalXNodes, globalYNodes):
print len(xDomain), len(nodeLists)
assert len(xDomain) == len(nodeLists)
assert len(yDomain) == len(nodeLists)
for i in xrange(len(nodeLists)):
xlist[0].extend(xDomain[i])
ylist[0].extend(yDomain[i])
plot = generateNewGnuPlot(persist = persist)
plot("set size square")
plot.title = title
assert len(xlist) == len(ylist)
for x, y in zip(xlist, ylist):
data = Gnuplot.Data(x, y,
with_ = style,
inline = True)
plot.replot(data)
SpheralGnuPlotCache.append(data)
return plot
else:
return fakeGnuplot()
#-------------------------------------------------------------------------------
# Plot all the nodes in the given data base, and then color the control/ghost
# nodes of the given boundary condition independently.
#-------------------------------------------------------------------------------
def plotBoundaryNodes(dataBase, boundary):
# First build one set of position pairs for all of the nodes in the
# data base.
positions = []
for nodeList in dataBase.nodeLists():
for r in list(nodeList.positions())[:nodeList.numInternalNodes]:
positions.append((r.x, r.y))
# Now build a list of the control node positions from the boundary
# condition.
controlPositions = []
for nodeList in dataBase.nodeLists():
controlNodes = boundary.controlNodes(nodeList)
for nodeID in controlNodes:
r = nodeList.positions()[nodeID]
controlPositions.append((r.x, r.y))
# Now build a list of the ghost node positions from the boundary
# condition.
ghostPositions = []
for nodeList in dataBase.nodeLists():
ghostNodes = boundary.ghostNodes(nodeList)
for nodeID in ghostNodes:
r = nodeList.positions()[nodeID]
ghostPositions.append((r.x, r.y))
# Finally we can plot these various sets of nodes.
plot = plotXYTuples([positions, controlPositions, ghostPositions])
return plot
#-------------------------------------------------------------------------------
# Plot the given sequences of (x,y) pairs, each with a distinct color.
# [ [(x0,y0), (x1,y1), ...],
# [(x0,y0), (x1,y1), ...],
# .
# .
# .
# [(x0,y0), (x1,y1), ...] ]
#-------------------------------------------------------------------------------
def plotXYTuples(listOfXYTuples):
# Find the (min,max) of X and Y for all sets.
xmin, ymin, xmax, ymax = findPairMinMax(listOfXYTuples[0])
for seq in listOfXYTuples[1:]:
xmin0, ymin0, xmax0, ymax0 = findPairMinMax(seq)
xmin = min(xmin, xmin0)
ymin = min(ymin, ymin0)
xmax = max(xmax, xmax0)
ymax = max(ymax, ymax0)
# Create our plot result.
plot = generateNewGnuPlot()
plot("set size square")
# Loop over the list of sequences of positions.
icolor = 0
for seq in listOfXYTuples:
icolor += 1
# Build the local arrays of x and y.
x = numpy.array([0.0]*len(seq))
y = numpy.array([0.0]*len(seq))
for i in xrange(len(seq)):
x[i] = seq[i][0]
y[i] = seq[i][1]
# Build the gnuplot data.
data = Gnuplot.Data(x, y,
with_ = "points",
inline = True)
SpheralGnuPlotCache.append(data)
# Plot this set of data.
## plot("set linestyle %i lt %i pt 1" % (icolor, icolor))
plot.replot(data)
# That"s it, return the plot.
return plot
#-------------------------------------------------------------------------------
# Find the (min, max) of a set of pairs.
#-------------------------------------------------------------------------------
def findPairMinMax(listOfPairs):
minX, minY = 1e90, 1e90
maxX, maxY = -1e90, -1e90
for pair in listOfPairs:
minX = min(minX, pair[0])
minY = min(minY, pair[1])
maxX = max(maxX, pair[0])
maxY = max(maxY, pair[1])
return minX, minY, maxX, maxY
#-------------------------------------------------------------------------------
# Plot the velocity field as a set of arrows.
# This is maintained here for backward compatibility, as a specialization of
# plotVectorField2d.
#-------------------------------------------------------------------------------
def plotVelocityField2d(dataBase,
plotGhosts = False,
velMultiplier = 1.0,
colorNodeLists = False,
colorDomains = False,
title = ""):
return plotVectorField2d(dataBase,
dataBase.globalVelocity,
plotGhosts,
velMultiplier,
colorNodeLists,
colorDomains,
title)
#-------------------------------------------------------------------------------
# Plot the node spacing in 1D.
#-------------------------------------------------------------------------------
def plotNodeSpacing1d(dataBase):
pos = dataBase.globalPosition
xvals = []
for ifield in xrange(len(pos)):
xvals += [pos[ifield][i].x for i in xrange(pos[ifield].numInternalElements)]
xvals = mpi.allreduce(xvals, mpi.SUM)
xvals.sort()
deltas = [xvals[i+1] - xvals[i] for i in xrange(len(xvals) - 1)] + [xvals[-1] - xvals[-2]]
plot = generateNewGnuPlot()
d = Gnuplot.Data(xvals, deltas, with_="lines")
plot.plot(d)
return plot
#-------------------------------------------------------------------------------
# Plot an arbitrary vector field as a set of arrows.
#-------------------------------------------------------------------------------
def plotVectorField2d(dataBase, fieldList,
plotGhosts = False,
vectorMultiplier = 1.0,
colorNodeLists = False,
colorDomains = False,
title = ""):
assert colorNodeLists + colorDomains <= 1
# Gather the node positions and vectors across all domains.
# Loop over all the NodeLists.
localNumNodes = []
xNodes = []
yNodes = []
vxNodes = []
vyNodes = []
for i in xrange(dataBase.numNodeLists):
nodeList = dataBase.nodeLists()[i]
assert i < fieldList.numFields
vectorField = fieldList[i]
if plotGhosts:
n = nodeList.numNodes
else:
n = nodeList.numInternalNodes
localNumNodes.append(n)
xNodes += numpy.array(map(lambda x: x.x, list(nodeList.positions())[:n]))
yNodes += numpy.array(map(lambda x: x.y, list(nodeList.positions())[:n]))
vxNodes += numpy.array(map(lambda x: x.x, list(vectorField)[:n]))*vectorMultiplier
vyNodes += numpy.array(map(lambda x: x.y, list(vectorField)[:n]))*vectorMultiplier
assert len(xNodes) == len(yNodes) == len(vxNodes) == len(vyNodes)
numDomainNodes = [len(xNodes)]
numNodesPerDomain = mpi.gather(numDomainNodes)
globalNumNodes = mpi.gather(localNumNodes)
globalXNodes = mpi.gather(xNodes)
globalYNodes = mpi.gather(yNodes)
globalVxNodes = mpi.gather(vxNodes)
globalVyNodes = mpi.gather(vyNodes)
if mpi.rank == 0:
plot = generateNewGnuPlot()
plot("set size square")
plot.title = title
if colorDomains:
cumulativeN = 0
for domain in xrange(len(numNodesPerDomain)):
n = numNodesPerDomain[domain]
x = numpy.array(globalXNodes[cumulativeN:cumulativeN + n])
y = numpy.array(globalYNodes[cumulativeN:cumulativeN + n])
vx = numpy.array(globalVxNodes[cumulativeN:cumulativeN + n])
vy = numpy.array(globalVyNodes[cumulativeN:cumulativeN + n])
cumulativeN += n
## plot("set linestyle %i lt %i pt %i" % (domain + 1,
## domain + 1,
## domain + 1))
data = Gnuplot.Data(x, y, vx, vy,
with_ = "vector ls %i" % (domain + 1),
inline = True)
plot.replot(data)
SpheralGnuPlotCache.append(data)
elif colorNodeLists:
cumulativeN = 0
for i in xrange(len(globalNumNodes)):
n = globalNumNodes[i]
if n > 0:
iNodeList = i % dataBase.numNodeLists
x = numpy.array(globalXNodes[cumulativeN:cumulativeN + n])
y = numpy.array(globalYNodes[cumulativeN:cumulativeN + n])
vx = numpy.array(globalVxNodes[cumulativeN:cumulativeN + n])
vy = numpy.array(globalVyNodes[cumulativeN:cumulativeN + n])
cumulativeN += n
## plot("set linestyle %i lt %i pt %i" % (iNodeList + 1,
## iNodeList + 1,
## iNodeList + 1))
data = Gnuplot.Data(x, y, vx, vy,
with_ = "vector ls %i" % (iNodeList + 1),
inline = True)
plot.replot(data)
SpheralGnuPlotCache.append(data)
else:
x = numpy.array(globalXNodes)
y = numpy.array(globalYNodes)
vx = numpy.array(globalVxNodes)
vy = numpy.array(globalVyNodes)
data = Gnuplot.Data(x, y, vx, vy,
with_ = "vector",
inline = True)
plot.replot(data)
SpheralGnuPlotCache.append(data)
return plot
else:
SpheralGnuPlotCache.append(data)
#-------------------------------------------------------------------------------
# Generate a regularly spaced sampling of the given FieldList
# The answer is returned in a 2-D numpy array.
#-------------------------------------------------------------------------------
def gridSample(fieldList,
zFunction = "%s",
nx = 100,
ny = 100,
xmin = None,
xmax = None,
ymin = None,
ymax = None):
assert nx > 0 and ny > 0
# Set up our return value array.
xValues = numpy.array([[0.0]*nx]*ny)
yValues = numpy.array([[0.0]*nx]*ny)
zValues = numpy.array([[0.0]*nx]*ny)
# Gather the fieldList info across all processors to process 0.
localNumNodes = []
localX = []
localY = []
for ifield in xrange(fieldList.numFields):
field = fieldList[ifield]
n = field.nodeList().numNodes
localNumNodes.append(n)
for r in field.nodeList().positions():
localX.append(r.x)
localY.append(r.y)
globalNumNodes = mpi.gather(localNumNodes)
globalX = mpi.gather(localX)
globalY = mpi.gather(localY)
# If the user did not specify the sampling volume, then find the min and
# max node positions.
if xmin == None:
xmin = min(localX)
if ymin == None:
ymin = min(localY)
if xmax == None:
xmax = max(localX)
if ymax == None:
ymax = max(localY)
xmin = mpi.allreduce(xmin, mpi.MIN)
ymin = mpi.allreduce(ymin, mpi.MIN)
xmax = mpi.allreduce(xmax, mpi.MAX)
ymax = mpi.allreduce(ymax, mpi.MAX)
assert xmax > xmin
assert ymax > ymin
# Figure out the sizes of the bins we're going to be sampling in
dx = (xmax - xmin)/nx
dy = (ymax - ymin)/ny
# Loop over all the grid sampling positions, and figure out this processors
# contribution.
for iy in xrange(ny):
for ix in xrange(nx):
xValues[iy][ix] = xmin + (ix + 0.5)*dx
yValues[iy][ix] = ymin + (iy + 0.5)*dy
r = Vector2d(xValues[iy][ix], yValues[iy][ix])
z = fieldList.sample(r)
localZ = eval(zFunction % "z")
globalZ = mpi.reduce(localZ, mpi.SUM)
if mpi.rank == 0:
print "%i %i %i %s %g %g" % (mpi.rank, ix, iy, r, z, localZ)
print "%i %g" % (mpi.rank, globalZ)
zValues[iy][ix] = globalZ
return xValues, yValues, zValues
#-------------------------------------------------------------------------------
# Plot the energy history of the given conservation object.
#-------------------------------------------------------------------------------
def plotEHistory(conserve):
if mpi.rank == 0:
t = conserve.timeHistory
E = conserve.EHistory
KE = conserve.KEHistory
TE = conserve.TEHistory
UE = conserve.EEHistory
Edata = Gnuplot.Data(t, E,
with_ = "lines",
title = "Total Energy",
inline = True)
KEdata = Gnuplot.Data(t, KE,
with_ = "lines",
title = "Kinetic Energy",
inline = True)
TEdata = Gnuplot.Data(t, TE,
with_ = "lines",
title = "Thermal Energy",
inline = True)
UEdata = Gnuplot.Data(t, UE,
with_ = "lines",
title = "Potential Energy",
inline = True)
plot = generateNewGnuPlot()
plot.replot(Edata)
plot.replot(KEdata)
plot.replot(TEdata)
plot.replot(UEdata)
plot.replot()
SpheralGnuPlotCache.extend([Edata, KEdata, TEdata, UEdata])
return plot
else:
return fakeGnuplot()
#-------------------------------------------------------------------------------
# Plot the linear momentum history of the given conservation object.
#-------------------------------------------------------------------------------
def plotpmomHistory(conserve):
if mpi.rank == 0:
t = conserve.timeHistory
p = conserve.pmomHistory
px = [x.x for x in p]
py = [x.y for x in p]
pz = [x.z for x in p]
pmag = [x.magnitude() for x in p]
pxdata = Gnuplot.Data(t, px,
with_ = "lines",
title = "x momentum",
inline = True)
pydata = Gnuplot.Data(t, py,
with_ = "lines",
title = "y momentum ",
inline = True)
pzdata = Gnuplot.Data(t, pz,
with_ = "lines",
title = "z momentum",
inline = True)
pmagdata = Gnuplot.Data(t, pmag,
with_ = "lines",
title = "total momentum",
inline = True)
plot = generateNewGnuPlot()
plot.replot(pxdata)
plot.replot(pydata)
plot.replot(pzdata)
plot.replot(pmagdata)
plot.replot()
SpheralGnuPlotCache.extend([pxdata, pydata, pzdata, pmagdata])
return plot
else:
return fakeGnuplot()
#-------------------------------------------------------------------------------
# Plot a polygon.
#-------------------------------------------------------------------------------
def plotPolygon(polygon,
plotVertices = True,
plotFacets = True,
plotNormals = False,
plotCentroid = False,
plot = None,
persist = False,
plotLabels = True):
px = []
py = []
for v in polygon.vertices:
px.append(v.x)
py.append(v.y)
fx = []
fy = []
fdx = []
fdy = []
nx = []
ny = []
ndx = []
ndy = []
for f in polygon.facets:
dr = f.point2 - f.point1
hdr = dr/2.0
fx.append(f.point1.x)
fy.append(f.point1.y)
fdx.append(dr.x)
fdy.append(dr.y)
nx.append(fx[-1] + hdr.x)
ny.append(fy[-1] + hdr.y)
ndx.append(f.normal.x)
ndy.append(f.normal.y)
if plot is None:
plot = generateNewGnuPlot(persist)
if plotLabels:
vlabel, flabel, nlabel = "Vertices", "Facets", "Normals"
else:
vlabel, flabel, nlabel = None, None, None
dataPoints = Gnuplot.Data(px, py,
with_ = "points pt 1 ps 2",
title = vlabel,
inline = True)
dataFacets = Gnuplot.Data(fx, fy, fdx, fdy,
with_ = "vectors",
title = flabel,
inline = True)
dataNormals = Gnuplot.Data(nx, ny, ndx, ndy,
with_ = "vectors",
title = nlabel,
inline = True)
if plotVertices:
plot.replot(dataPoints)
if plotFacets:
plot.replot(dataFacets)
if plotNormals:
plot.replot(dataNormals)
if plotCentroid:
c = polygon.centroid
dataCentroid = Gnuplot.Data([c.x], [c.y],
with_ = "points pt 2 ps 2",
title = "Centroid",
inline = True)
plot.replot(dataCentroid)
SpheralGnuPlotCache.extend([dataPoints, dataFacets, dataNormals, plot])
return plot
#-------------------------------------------------------------------------------
# Plot a PolygonalMesh
#-------------------------------------------------------------------------------
def plotPolygonalMesh(mesh,
persist = False):
polylocal = []
for izone in xrange(mesh.numZones):
zone = mesh.zone(izone)
polylocal.append([mesh.node(i).position() for i in zone.nodeIDs])
polylocal[-1].append(polylocal[-1][0])
assert len(polylocal) == mesh.numZones
p = generateNewGnuPlot(persist)
for sendProc in xrange(mpi.procs):
polys = mpi.bcast(polylocal, root=sendProc)
for poly in polys:
p.replot(Gnuplot.Data([x.x for x in poly], [x.y for x in poly],
with_ = "lines lt %i lw 2" % 1,
title = None,
inline = True))
return p
## edges0 = [(mesh.node(mesh.edge(i).node1ID).position(), mesh.node(mesh.edge(i).node2ID).position())
## for i in xrange(mesh.numEdges)]
## p = generateNewGnuPlot()
## datas = []
## for sendProc in xrange(mpi.procs):
## edges = mpi.bcast(edges0, root=sendProc)
## for edge in edges:
## datas.append(Gnuplot.Data([edge[0].x, edge[1].x], [edge[0].y, edge[1].y],
## with_ = "lines %s" % linetype,
## title = None,
## inline = True))
## p.replot(datas[-1])
## p.datas = datas
## return p
```
#### File: src/SimulationControl/SpheralHadesDump.py
```python
import Spheral
from SpheralCompiledPackages import silo
from writeSiloQuadMesh import writeSiloQuadMesh
import mpi
import sys, os, struct, time, bisect
from operator import mul
#-------------------------------------------------------------------------------
# Write a silo file resampling to a fixed cartesian mesh for the density.
#-------------------------------------------------------------------------------
def hadesDump(integrator,
nsample,
xmin,
xmax,
W,
baseFileName,
baseDirectory = ".",
procDirBaseName = "domains",
mask = None,
materials = None):
# Currently suppport 2D and 3D.
db = integrator.dataBase
if db.nDim == 2:
import Spheral2d as sph
elif db.nDim == 3:
import Spheral3d as sph
else:
raise RuntimeError, "hadesDump ERROR: must be 2D or 3D"
# Prepare to time how long this takes.
t0 = time.clock()
# Get the set of material names we're going to write.
if materials is None:
materials = list(db.fluidNodeLists())
# HACK! We are currently restricting to writing single material output!
assert len(materials) == 1
# Make sure the output directory exists.
if mpi.rank == 0 and not os.path.exists(baseDirectory):
try:
os.makedirs(baseDirectory)
except:
raise RuntimeError, "Cannot create output directory %s" % baseDirectory
mpi.barrier()
# Sample the density.
ntot = reduce(mul, nsample)
for nodes in materials:
print "hadesDump: sampling density for %s..." % nodes.name
r = sph.VectorFieldList()
H = sph.SymTensorFieldList()
rho = sph.ScalarFieldList()
r.appendField(nodes.positions())
H.appendField(nodes.Hfield())
rho.appendField(nodes.massDensity())
mf = nodes.mass()
rhof = nodes.massDensity()
wf = sph.ScalarField("volume", nodes)
for i in xrange(nodes.numNodes):
wf[i] = mf[i]/max(1e-100, rhof[i])
w = sph.ScalarFieldList()
w.copyFields()
w.appendField(wf)
#w.appendField(sph.ScalarField("weight", nodes, 1.0))
fieldListSet = sph.FieldListSet()
fieldListSet.ScalarFieldLists.append(rho)
localMask = sph.IntFieldList()
if mask is None:
localMask.copyFields()
localMask.appendField(sph.IntField("mask", nodes, 1))
else:
localMask.appendField(mask.fieldForNodeList(nodes))
scalar_samples = sph.vector_of_vector_of_double()
vector_samples = sph.vector_of_vector_of_Vector()
tensor_samples = sph.vector_of_vector_of_Tensor()
symTensor_samples = sph.vector_of_vector_of_SymTensor()
(scalar_samples,
vector_samples,
tensor_samples,
symTensor_samples) = sph.sampleMultipleFields2Lattice(fieldListSet,
r, w, H, localMask,
W,
xmin, xmax,
sph.vector_of_int(nsample))
print "Generated %i scalar fields" % len(scalar_samples)
# Write out the silo info
writeSiloQuadMesh(scalar_data = scalar_samples,
ndim = db.nDim,
xmin = xmin,
xmax = xmax,
nglobal = nsample,
filename = baseFileName,
dirname = baseDirectory,
scalar_names = ("den",),
materials = materials,
time = integrator.currentTime,
cycle = integrator.currentCycle,
RZ = (GeometryRegistrar.coords() == CoordinateType.RZ))
mpi.barrier()
print "hadesDump finished: required %0.2f seconds" % (time.clock() - t0)
return
```
#### File: src/SimulationControl/SpheralPointmeshSiloDump.py
```python
from SpheralCompiledPackages import *
import os, time, mpi
from siloPointmeshDump import siloPointmeshDump
#-------------------------------------------------------------------------------
# Dump out all the Fields in a State object.
# You can pass any of the following for stateThingy:
# Integrator
# State
#-------------------------------------------------------------------------------
def dumpPhysicsState(stateThingy,
baseFileName,
baseDirectory = ".",
fields = None,
fieldLists = None,
currentTime = None,
currentCycle = None,
dumpGhosts = False,
dumpDerivatives = False,
boundaries = None):
# What did we get passed?
t0 = time.time()
dim = type(stateThingy).__name__[-2:]
if isinstance(stateThingy, eval("Integrator%s" % dim)):
integrator = stateThingy
dataBase = integrator.dataBase
state = eval("State%id(dataBase, integrator.physicsPackages())" % dataBase.nDim)
for p in integrator.physicsPackages():
p.registerAdditionalVisualizationState(dataBase, state)
derivs = eval("StateDerivatives%id(dataBase, integrator.physicsPackages())" % dataBase.nDim)
if dumpGhosts:
integrator.setGhostNodes()
integrator.applyGhostBoundaries(state, derivs)
integrator.finalizeGhostBoundaries()
currentTime = integrator.currentTime
currentCycle = integrator.currentCycle
elif isinstance(stateThingy, eval("State%s" % dim)):
integrator = None
state = stateThingy
derivs = None
assert currentTime is not None
assert currentCycle is not None
dataBase = eval("DataBase%s()" % dim)
assert state.fieldNameRegistered(HydroFieldNames.mass)
mass = state.scalarFields(HydroFieldNames.mass)
for nodes in mass.nodeListPtrs():
dataBase.appendNodeList(nodes)
assert state is not None
assert dataBase is not None
# Did the user specify any data to be dumped?
if not fields:
fields = []
if not fieldLists:
fieldLists = []
# Build up the list of fields in the state object.
fields += [x for x in state.allIntFields()]
fields += [x for x in state.allScalarFields()]
fields += [x for x in state.allVectorFields()]
fields += [x for x in state.allTensorFields()]
fields += [x for x in state.allSymTensorFields()]
# Are we also dumping the derivative fields?
if not derivs is None:
fields += [x for x in derivs.allIntFields()]
fields += [x for x in derivs.allScalarFields()]
fields += [x for x in derivs.allVectorFields()]
fields += [x for x in derivs.allTensorFields()]
fields += [x for x in derivs.allSymTensorFields()]
# If available, add the work, H inverse and hmin, hmax, & hmin_hmax_ratio by default.
if dataBase:
work = dataBase.globalWork
fieldLists.append(work)
Hfl = dataBase.fluidHfield
Hi = dataBase.newGlobalSymTensorFieldList()
dataBase.fluidHinverse(Hi)
fieldLists.append(Hi)
hmin = dataBase.newGlobalScalarFieldList()
hmax = dataBase.newGlobalScalarFieldList()
hminhmax = dataBase.newGlobalScalarFieldList()
for H, fmin, fmax, fratio in zip(Hfl,
hmin,
hmax,
hminhmax):
fmin.name = "hmin"
fmax.name = "hmax"
fratio.name = "hmin_hmax_ratio"
if dumpGhosts:
n = H.nodeList().numNodes
else:
n = H.nodeList().numInternalNodes
for i in xrange(n):
ev = H[i].eigenValues()
fmin[i] = 1.0/ev.maxElement()
fmax[i] = 1.0/max(1e-30, ev.minElement())
fratio[i] = ev.minElement()/max(1e-30, ev.maxElement())
fieldLists.append(hmin)
fieldLists.append(hmax)
fieldLists.append(hminhmax)
# Add a domain decomposition tag (if we're parallel).
try:
import mpi
domains = dataBase.newGlobalScalarFieldList()
for f in domains:
f.name = "Domains"
if dumpGhosts:
n = f.nodeList().numNodes
else:
n = f.nodeList().numInternalNodes
for i in xrange(n):
f[i] = mpi.rank
fieldLists.append(domains)
except:
pass
# Dump the sucker.
t1 = time.time()
fullBaseName = baseFileName + "-time=%g-cycle=%i" % (currentTime, currentCycle)
siloPointmeshDump(baseName = fullBaseName,
baseDirectory = baseDirectory,
fields = fields,
fieldLists = fieldLists,
time = currentTime,
cycle = currentCycle,
dumpGhosts = dumpGhosts)
# Add to the master file.
if mpi.rank == 0:
masterFileName = os.path.join(baseDirectory, baseFileName + ".visit")
mf = open(masterFileName, "a")
mf.write("%s\n" % (fullBaseName + ".silo"))
mf.close()
t2 = time.time()
print "SpheralPointMeshSiloDump: spent %g seconds on preliminaries, %g writing files." % (t1 - t0, t2 - t1)
return
```
#### File: Damage/ExpandingTube/bevelTubeEntrance.py
```python
from math import *
from SpheralTestUtilities import *
#-------------------------------------------------------------------------------
# A method to adjust the node positions and masses of the cylinder to match the
# beveling used in the experiments.
#
# A subtly here is that in 2-D we assume that the tube is aligned along the x
# axis, but in 3-D along the z. :(
#-------------------------------------------------------------------------------
def bevelTubeEntrance(tubeNodeList,
nDim, # number of dimensions (2,3)
openingAngle, # radians
tubeInnerRadius, # length units
tubeThickness, # length units
xBevelBegin): # length units
# Pre-conditions.
assert nDim == 2 or nDim == 3
assert openingAngle >= 0.0
assert tubeInnerRadius >= 0.0
assert tubeThickness > 0.0
# Pre-compute some useful geometry.
tantheta = tan(openingAngle)
tubeOuterRadius = tubeInnerRadius + tubeThickness
vol0 = -1.0
if nDim == 2:
vol0 = tubeThickness
else:
vol0 = pi*(tubeOuterRadius**2 - tubeInnerRadius**2)
assert vol0 > 0.0
# Get the position and mass fields.
position = tubeNodeList.positions()
mass = tubeNodeList.mass()
# Iterate over the nodes in the node list.
numNodesBeveled = 0
for i in xrange(tubeNodeList.numInternalNodes):
# Is this node in the x-range to be beveled?
xi = position[i].x
yi = position[i].y
zi = 0.0
if nDim == 3:
xi = position[i].z
zi = position[i].x
if xi > xBevelBegin:
numNodesBeveled += 1
# Adjust the position.
dThickness = tantheta * (xi - xBevelBegin)
assert dThickness >= 0.0 and dThickness < tubeThickness
rmult = 1.0 - dThickness/tubeThickness
assert rmult > 0.0 and rmult <= 1.0
if nDim == 2:
assert distinctlyGreaterThan(yi, 0.0)
drFromOuterRadius = rmult*(tubeOuterRadius - yi)
assert drFromOuterRadius >= 0.0 and drFromOuterRadius <= tubeOuterRadius
dr = tubeOuterRadius - drFromOuterRadius
A = dr/yi
assert A >= 1.0
position[i].y *= A
assert position[i].y >= yi and position[i].y <= tubeOuterRadius
else:
drold = sqrt(yi*yi + zi*zi)
assert distinctlyGreaterThan(drold, 0.0)
drFromOuterRadius = rmult*(tubeOuterRadius - drold)
assert drFromOuterRadius >= 0.0 and drFromOuterRadius <= tubeOuterRadius
dr = tubeOuterRadius - drFromOuterRadius
A = dr/drold
assert A >= 1.0
position[i].x *= A
position[i].y *= A
drnew = sqrt(position[i].x**2 + position[i].y**2)
assert drnew >= drold and drnew <= tubeOuterRadius
# Adjust the node mass.
dvol = -1.0
if nDim == 2:
dvol = dThickness
else:
dvol = pi*((tubeInnerRadius + dThickness)**2 - tubeInnerRadius**2)
assert dvol >= 0.0
dm = dvol/vol0
mmult = 1.0 - dm
mass[i] *= mmult
return numNodesBeveled
```
#### File: Damage/ExpandingTube/ExpandingTube-rz.py
```python
from Numeric import *
from SolidSpheral import *
from SpheralOptionParser import commandLine
from SpheralTestUtilities import *
from SpheralGnuPlotUtilities import *
from SpheralController import *
from findLastRestart import *
from SpheralVisitDump import dumpPhysicsState
from math import *
from bevelTubeEntrance import *
import sys
sys.path.append("../Utilities")
from NodeHistory import NodeHistory
import mpi
#-------------------------------------------------------------------------------
# Identify ourselves!
#-------------------------------------------------------------------------------
title("RZ expanding tube impact strength/damage model test")
#-------------------------------------------------------------------------------
# Generic problem parameters
# All CGS units.
#-------------------------------------------------------------------------------
commandLine(
SolidNodeListConstructor = SphSolidNodeList3d,
# Geometry
tubeThickness = 0.3,
rtubeInner = 0.5*1.27,
ltube = 5.08,
lCuAnvil = 1.0,
lFoamAnvil = 1.0,
lSteelAnvil = 1.0,
# Numbers of nodes.
nrtube = 20,
nltube = 338,
nrAnvil = 100,
nrSteelAnvilCap = 5,
# VISAR sampling parameters.
dxVISAR = 0.04,
dyVISAR = 0.04,
VISARsampleFrequency = 10,
# Inital velocity of the projectile.
vxproj = -1.92e5,
# Parameters for the damage model.
DamageModelConstructor = GradyKippTensorDamage3d,
kWeibullSteelFactor = 1.0,
mWeibullSteelFactor = 1.0,
randomSeedSteel = 109482993,
strainType = TensorDamageModel3d.TensorStrainAlgorithm.StrainHistory,
# Node seeding stuff.
nPerh = 2.01,
# Material specific bounds on the mass density.
etaMinSteel = 0.6,
etaMaxSteel = 1.4,
etaMinLexan = 0.5,
etaMaxLexan = 1.5,
etaMinCu = 0.5,
etaMaxCu = 1.5,
etaMinFoam = 0.5,
etaMaxFoam = 1.5,
# Hydro parameters.
Qconstructor = TensorMonaghanGingoldViscosity3d,
Cl = 1.0,
Cq = 1.0,
Qlimiter = True,
balsaraCorrection = False,
epsilon2 = 1e-2,
hmin = 1.0e-5,
hmax = 0.5,
hminratio = 0.1,
cfl = 0.5,
XSPH = True,
epsilonTensile = 0.0,
nTensile = 4,
HEvolution = Hydro3d.HEvolutionType.IdealH,
sumForMassDensity = Hydro3d.MassDensityType.IntegrateDensity,
compatibleEnergyEvolution = True,
# Times, and simulation control.
goalTime = 50.0e-6,
dtSample = 50.0e-6 / 200.0,
dt = 1e-10,
dtMin = 1e-10,
dtMax = 1e-3,
dtGrowth = 2.0,
maxSteps = 200,
statsStep = 10,
redistributeStep = None,
smoothIters = 0,
# Restart and output files.
restoreCycle = None,
restartStep = 200,
baseDir = "dumps-expandingTube-rz",
)
# Derived geometry.
rtubeOuter = rtubeInner + tubeThickness
rplug, lplug = rtubeInner, 0.5*ltube
rproj, lproj = rplug, lplug
rAnvil = 2.0*rtubeOuter
lAnvil = lCuAnvil + lFoamAnvil + lSteelAnvil
# Use the above geometry to define enclosing points of the materials for the
# node generators.
xminTube = (lAnvil, rtubeInner)
xmaxTube = (lAnvil + ltube, rtubeOuter)
xminPlug = (lAnvil, 0.0)
xmaxPlug = (lAnvil + lplug, rplug)
xminProj = (lAnvil + ltube, 0.0)
xmaxProj = (lAnvil + ltube + lproj, rproj)
xminSteelAnvil = (0.0, 0.0)
xmaxSteelAnvil = (lSteelAnvil, rAnvil)
xminFoamAnvil = (lSteelAnvil, 0.0)
xmaxFoamAnvil = (lSteelAnvil + lFoamAnvil, rAnvil)
xminCuAnvil = (lSteelAnvil + lFoamAnvil, 0.0)
xmaxCuAnvil = (lAnvil, rAnvil)
xminSteelAnvilCap = (0.0, rAnvil)
xmaxSteelAnvilCap = (lAnvil, rAnvil + float(nrSteelAnvilCap)/float(nrAnvil)*rAnvil)
# The geometry of the bevel on the inner tube opening surface.
tubeOpeningAngle = 1.8 * pi/180.0 # radians
xBevelBegin = lAnvil + ltube - 0.6
# Define the VISAR sampling points.
xVISARa = lAnvil + 2.5
xVISARb = lAnvil + 2.0
xVISARc = lAnvil + 1.5
yVISARa = rtubeOuter
yVISARb = rtubeOuter
yVISARc = rtubeOuter
# Derived numbers of nodes.
nrplug, nlplug = int(rplug/lplug * nltube/4), nltube/4
nrproj, nlproj = nrplug, nlplug
nlAnvil = int(nrAnvil * lAnvil/rAnvil)
nlSteelAnvil = int(nlAnvil * 0.5*lSteelAnvil/lAnvil + 0.5)
nlFoamAnvil = int(nlAnvil * 0.25*lFoamAnvil/lAnvil + 0.5)
nlCuAnvil = int(nlAnvil * 0.5*lCuAnvil/lAnvil + 0.5)
nrSteelAnvil = int(nlSteelAnvil * rAnvil/lSteelAnvil + 0.5)
nrFoamAnvil = int(nlFoamAnvil * rAnvil/lFoamAnvil + 0.5)
nrCuAnvil = int(nlCuAnvil * rAnvil/lCuAnvil + 0.5)
nlSteelAnvilCap = nlSteelAnvil + nlFoamAnvil + nlCuAnvil
# Mass densities.
rho0Steel = 7.85
rho0Lexan = 1.196
rho0Air = 0.001205
rho0Foam = 1.046
rho0Cu = 8.93
# Inital velocity of the projectile.
v0proj = Vector3d(vxproj, 0.0, 0.0)
# Damage model parameters.
kWeibullSteel = 8.8e4 * kWeibullSteelFactor
mWeibullSteel = 2.63 * mWeibullSteelFactor
volumeSteel = pi*(rtubeOuter**2 - rtubeInner**2)*ltube
# Restart and output files.
dataDir = "%s/%s/%s/k=%4.2f_m=%4.2f" % (baseDir,
str(DamageModelConstructor).split("'")[1],
str(SolidNodeListConstructor).split("'")[1],
kWeibullSteel,
mWeibullSteel)
restartDir = dataDir + "/restarts/proc-%04i" % mpi.rank
visitDir = dataDir + "/visit"
restartBaseName = restartDir + "/ExpandingTube"
#-------------------------------------------------------------------------------
# Check if the necessary output directories exist. If not, create them.
#-------------------------------------------------------------------------------
import os, sys
if mpi.rank == 0:
if not os.path.exists(dataDir):
os.makedirs(dataDir)
if not os.path.exists(visitDir):
os.makedirs(visitDir)
if not os.path.exists(restartDir):
os.makedirs(restartDir)
mpi.barrier()
if not os.path.exists(restartDir):
os.makedirs(restartDir)
mpi.barrier()
#-------------------------------------------------------------------------------
# If we're restarting, find the set of most recent restart files.
#-------------------------------------------------------------------------------
if restoreCycle is None:
restoreCycle = findLastRestart(restartBaseName)
#-------------------------------------------------------------------------------
# Stainless steel material properties.
#-------------------------------------------------------------------------------
eosSteel = LinearPolynomialEquationOfStateCGS3d(rho0Steel, # reference density
etaMinSteel, # etamin
etaMaxSteel, # etamax
0.0, # A0
1.649901e12, # A1
1.674656e12, # A2
0.832543e12, # A3
1.93, # B1
0.5, # B2
0.0, # B3
55.350) # atomic weight
coldFitSteel = NinthOrderPolynomialFit(-1.06797724e10,
-2.06872020e10,
8.24893246e11,
-2.39505843e10,
-2.44522017e10,
5.38030101e10,
0.0,
0.0,
0.0,
0.0)
meltFitSteel = NinthOrderPolynomialFit(7.40464217e10,
2.49802214e11,
1.00445029e12,
-1.36451475e11,
7.72897829e9,
5.06390305e10,
0.0,
0.0,
0.0,
0.0)
strengthModelSteel = SteinbergGuinanStrengthCGS3d(eosSteel,
7.700000e11, # G0
2.2600e-12, # A
4.5500e-04, # B
3.4000e9, # Y0
2.5e10, # Ymax
1.0e-3, # Yp
43.0000, # beta
0.0, # gamma0
0.35, # nhard
coldFitSteel,
meltFitSteel)
#-------------------------------------------------------------------------------
# Lexan material properties.
# Note for lack of strength information about this material, I'm subsituting in
# the strength paramters for lucite here. :)
#-------------------------------------------------------------------------------
eosLexan = GruneisenEquationOfStateCGS3d(rho0Lexan, # reference density
etaMinLexan, # etamin
etaMaxLexan, # etamax
0.1933e6, # C0
3.49, # S1
-8.2, # S2
9.6, # S3
0.61, # gamma0
0.0, # b
28423.0) # atomic weight
coldFitLexan = NinthOrderPolynomialFit(-5.19191852e9,
-4.41500192e9,
2.84720528e10,
2.14093899e10,
-4.46412259e9,
1.24495222e9,
0.0,
0.0,
0.0,
0.0)
meltFitLexan = NinthOrderPolynomialFit(5.24383771e8,
1.49188457e9,
2.85704428e10,
2.13783662e10,
-4.45135120e9,
1.24138074e9,
0.0,
0.0,
0.0,
0.0)
strengthLexan = SteinbergGuinanStrengthCGS3d(eosLexan,
2.320000e10, # G0
0.0, # A
0.0, # B
4.2000e9, # Y0
1.0e12, # Ymax
1.0e-3, # Yp
0.0, # beta
0.0, # gamma0
0.0, # nhard
coldFitLexan,
meltFitLexan)
#-------------------------------------------------------------------------------
# Copper material properties.
#-------------------------------------------------------------------------------
eosCu = GruneisenEquationOfStateCGS3d(rho0Cu, # reference density
etaMinCu, # etamin
etaMaxCu, # etamax
0.394e6, # C0
1.489, # S1
0.0, # S2
0.0, # S3
2.02, # gamma0
0.47, # b
63.57) # atomic weight
coldFitCu = NinthOrderPolynomialFit(-1.05111874e10,
-2.13429672e10,
6.92768584e11,
-2.45626513e10,
-2.48677403e10,
4.35373677e10,
0.0,
0.0,
0.0,
0.0)
meltFitCu = NinthOrderPolynomialFit(5.22055639e10,
1.90143176e11,
8.51351901e11,
-1.12049022e11,
-6.11436674e9,
4.36007831e10,
0.0,
0.0,
0.0,
0.0)
strengthModelCu = SteinbergGuinanStrengthCGS3d(eosCu,
4.770000e11, # G0
2.8300e-12, # A
3.7700e-04, # B
1.2000e9, # Y0
6.4000e9, # Ymax
1.0e-3, # Yp
36.0000, # beta
0.0, # gamma0
0.45, # nhard
coldFitCu,
meltFitCu)
#-------------------------------------------------------------------------------
# Foam material properties. (Polystyrene CH)
#-------------------------------------------------------------------------------
eosFoam = GruneisenEquationOfStateCGS3d(rho0Foam, # reference density
etaMinFoam, # etamin
etaMaxFoam, # etamax
0.189e6, # C0
2.965, # S1
-4.069, # S2
2.328, # S3
0.67, # gamma0
0.0, # b
6.982) # atomic weight
#-------------------------------------------------------------------------------
# Air material properties.
#-------------------------------------------------------------------------------
gammaAir = 1.4
molecularWeightAir = 30.0
eosAir = GammaLawGasCGS3d(gammaAir, molecularWeightAir)
#-------------------------------------------------------------------------------
# Create our interpolation kernels -- one for normal hydro interactions, and
# one for use with the artificial viscosity
#-------------------------------------------------------------------------------
WT = TableKernel3d(BSplineKernel3d(), 1000)
WTPi = TableKernel3d(BSplineKernel3d(), 1000)
output("WT")
output("WTPi")
kernelExtent = WT.kernelExtent()
#-------------------------------------------------------------------------------
# Create the NodeLists.
#-------------------------------------------------------------------------------
nodesSteel = SolidNodeListConstructor("Stainless steel", eosSteel, strengthModelSteel, WT, WTPi)
nodesPlug = SolidNodeListConstructor("Lexan plug", eosLexan, strengthLexan, WT, WTPi)
nodesProj = SolidNodeListConstructor("Lexan projectile", eosLexan, strengthLexan, WT, WTPi)
nodesSteelAnvil = SolidNodeListConstructor("Anvil (Steel)", eosSteel, strengthModelSteel, WT, WTPi)
nodesFoamAnvil = SolidNodeListConstructor("Anvil (Foam)", eosFoam, strengthLexan, WT, WTPi)
nodesCuAnvil = SolidNodeListConstructor("Anvil (Copper)", eosCu, strengthModelCu, WT, WTPi)
nodeSet = [nodesSteel,
nodesPlug,
nodesProj,
nodesSteelAnvil,
nodesFoamAnvil,
nodesCuAnvil]
for n, rho0, etaMin, etaMax in zip(nodeSet,
[rho0Steel, rho0Lexan, rho0Lexan, rho0Steel, rho0Lexan, rho0Cu],
[etaMinSteel, etaMinLexan, etaMinLexan, etaMinSteel, etaMinLexan, etaMinCu],
[etaMaxSteel, etaMaxLexan, etaMaxLexan, etaMaxSteel, etaMaxLexan, etaMaxCu]):
n.nodesPerSmoothingScale = nPerh
n.epsilonTensile = epsilonTensile
n.nTensile = nTensile
n.hmin = hmin
n.hmax = hmax
n.hminratio = hminratio
n.XSPH = XSPH
n.rhoMin = etaMin*rho0
n.rhoMax = etaMax*rho0
output("n.name()")
output(" n.nodesPerSmoothingScale")
output(" n.epsilonTensile")
output(" n.nTensile")
output(" n.hmin")
output(" n.hmax")
output(" n.hminratio")
output(" n.XSPH")
output(" n.rhoMin")
output(" n.rhoMax")
del n
#-------------------------------------------------------------------------------
# Construct the neighbor objects and associate them with the node lists.
#-------------------------------------------------------------------------------
cache = []
for n in nodeSet:
neighbor = TreeNeighbor3d(n,
kernelExtent = kernelExtent)
n.registerNeighbor(neighbor)
cache.append(neighbor)
del n
#-------------------------------------------------------------------------------
# Set node properties (positions, velocites, etc.)
#-------------------------------------------------------------------------------
if restoreCycle is None:
print "Generating node distribution."
from GenerateNodeDistribution2d import *
from CompositeNodeDistribution import *
from ParMETISDistributeNodes import distributeNodes3d
generatorTube = GenerateNodeDistributionRZ(nltube,
nrtube,
rho0Steel,
"lattice",
xmin = xminTube,
xmax = xmaxTube,
nNodePerh = nPerh,
SPH = not isinstance(nodesSteel, AsphSolidNodeList3d))
generatorPlug = GenerateNodeDistributionRZ(nlplug,
nrplug,
rho0Lexan,
"lattice",
xmin = xminPlug,
xmax = xmaxPlug,
nNodePerh = nPerh,
SPH = not isinstance(nodesPlug, AsphSolidNodeList3d))
generatorProj = GenerateNodeDistributionRZ(nlproj,
nrproj,
rho0Lexan,
"lattice",
xmin = xminProj,
xmax = xmaxProj,
nNodePerh = nPerh,
SPH = not isinstance(nodesProj, AsphSolidNodeList3d))
generatorSteelAnvil1 = GenerateNodeDistributionRZ(nlSteelAnvil,
nrSteelAnvil,
rho0Steel,
"lattice",
xmin = xminSteelAnvil,
xmax = xmaxSteelAnvil,
nNodePerh = nPerh,
SPH = not isinstance(nodesSteelAnvil, AsphSolidNodeList3d))
generatorSteelAnvil2 = GenerateNodeDistributionRZ(nlSteelAnvilCap,
nrSteelAnvilCap,
rho0Steel,
"lattice",
xmin = xminSteelAnvilCap,
xmax = xmaxSteelAnvilCap,
nNodePerh = nPerh,
SPH = not isinstance(nodesSteelAnvil, AsphSolidNodeList3d))
generatorSteelAnvil = CompositeNodeDistribution(generatorSteelAnvil1, generatorSteelAnvil2)
generatorFoamAnvil = GenerateNodeDistributionRZ(nlFoamAnvil,
nrFoamAnvil,
rho0Foam,
"lattice",
xmin = xminFoamAnvil,
xmax = xmaxFoamAnvil,
nNodePerh = nPerh,
SPH = not isinstance(nodesFoamAnvil, AsphSolidNodeList3d))
generatorCuAnvil = GenerateNodeDistributionRZ(nlCuAnvil,
nrCuAnvil,
rho0Cu,
"lattice",
xmin = xminCuAnvil,
xmax = xmaxCuAnvil,
nNodePerh = nPerh,
SPH = not isinstance(nodesCuAnvil, AsphSolidNodeList3d))
print "Starting node distribution..."
distributeNodes3d((nodesSteel, generatorTube),
(nodesPlug, generatorPlug),
(nodesProj, generatorProj),
(nodesSteelAnvil, generatorSteelAnvil),
(nodesFoamAnvil, generatorFoamAnvil),
(nodesCuAnvil, generatorCuAnvil))
nGlobalNodes = 0
for n in nodeSet:
print "Generator info for %s" % n.name()
output(" mpi.allreduce(n.numInternalNodes, mpi.MIN)")
output(" mpi.allreduce(n.numInternalNodes, mpi.MAX)")
output(" mpi.allreduce(n.numInternalNodes, mpi.SUM)")
nGlobalNodes += mpi.allreduce(n.numInternalNodes, mpi.SUM)
del n
print "Total number of (internal) nodes in simulation: ", nGlobalNodes
# Bevel the inner opening surface of the target tube.
numNodesBeveled = bevelTubeEntrance(nodesSteel,
2,
tubeOpeningAngle,
rtubeInner,
tubeThickness,
xBevelBegin)
print "Beveled %i nodes in the tube opening." % mpi.allreduce(numNodesBeveled,
mpi.SUM)
# Adjust the diameter of the projectile inward a bit, so it will slide
# into the tube properly.
drProj = 0.75*nPerh*rproj/nrproj
projMultiplier = (rproj - drProj)/rproj
for i in xrange(nodesProj.numInternalNodes):
nodesProj.positions()[i].y *= projMultiplier
# Adjust the plug to match.
for i in xrange(nodesPlug.numInternalNodes):
nodesPlug.positions()[i].y *= projMultiplier
# Iterate over the NodeLists and set some initial conditions.
for n, rho0 in [(nodesSteel, rho0Steel),
(nodesPlug, rho0Lexan),
(nodesProj, rho0Lexan),
(nodesSteelAnvil, rho0Steel),
(nodesFoamAnvil, rho0Foam),
(nodesCuAnvil, rho0Cu)]:
# Set node specific thermal energies
u0 = n.equationOfState().specificThermalEnergy(rho0, 300.0)
n.specificThermalEnergy(ScalarField3d("tmp", n, u0))
print "Initial pressure for %s: %g" % (n.name(),
n.equationOfState().pressure(rho0, u0))
del n
# Set the projectile velocities.
nodesProj.velocity(VectorField3d("tmp", nodesProj, v0proj))
#-------------------------------------------------------------------------------
# Construct a DataBase to hold our node lists.
#-------------------------------------------------------------------------------
db = DataBase3d()
for n in nodeSet:
db.appendNodeList(n)
del n
output("db")
output("db.numNodeLists")
output("db.numFluidNodeLists")
#-------------------------------------------------------------------------------
# Construct the artificial viscosity.
#-------------------------------------------------------------------------------
q = Qconstructor(Cl, Cq)
q.limiter = Qlimiter
q.balsaraShearCorrection = balsaraCorrection
q.epsilon2 = epsilon2
output("q")
output("q.Cl")
output("q.Cq")
output("q.limiter")
output("q.epsilon2")
output("q.balsaraShearCorrection")
#-------------------------------------------------------------------------------
# Construct the hydro physics object.
#-------------------------------------------------------------------------------
hydro = Hydro3d(WT, WTPi, q, compatibleEnergyEvolution)
hydro.cfl = cfl
hydro.useVelocityMagnitudeForDt = True
hydro.HEvolution = HEvolution
hydro.sumForMassDensity = sumForMassDensity
hydro.HsmoothMin = hmin
hydro.HsmoothMax = hmax
output("hydro")
output("hydro.cfl")
output("hydro.useVelocityMagnitudeForDt")
output("hydro.HEvolution")
output("hydro.sumForMassDensity")
output("hydro.HsmoothMin")
output("hydro.HsmoothMax")
output("hydro.kernel()")
output("hydro.PiKernel()")
output("hydro.valid()")
#-------------------------------------------------------------------------------
# Construct a strength physics object.
#-------------------------------------------------------------------------------
strength = Strength3d()
output("strength")
#-------------------------------------------------------------------------------
# Construct a damage model.
#-------------------------------------------------------------------------------
nfull = max(1, (2.0*pi*(rtubeInner + 0.5*tubeThickness)/(tubeThickness/nrtube) *
mpi.allreduce(nodesSteel.numInternalNodes, mpi.SUM)))
nflaws = int(nfull*log(nfull))
print "Computing equivalent 3-D number of nodes in tube: %i" % nfull
print "Resulting in effective total number of flaws in volume: %i" % nflaws
damageModel = DamageModelConstructor(nodesSteel,
kWeibullSteel,
mWeibullSteel,
volumeSteel,
WT,
randomSeedSteel,
strainType,
0.4,
1,
nflaws)
output("damageModel")
#-------------------------------------------------------------------------------
# Construct a predictor corrector integrator.
#-------------------------------------------------------------------------------
integrator = PredictorCorrectorIntegrator3d(db)
integrator.appendPhysicsPackage(hydro)
integrator.appendPhysicsPackage(strength)
integrator.appendPhysicsPackage(damageModel)
integrator.lastDt = dt
if dtMin:
integrator.dtMin = dtMin
if dtMax:
integrator.dtMax = dtMax
integrator.dtGrowth = dtGrowth
output("integrator")
output("integrator.havePhysicsPackage(hydro)")
output("integrator.havePhysicsPackage(strength)")
output("integrator.havePhysicsPackage(damageModel)")
output("integrator.valid()")
output("integrator.lastDt")
output("integrator.dtMin")
output("integrator.dtMax")
output("integrator.dtGrowth")
#-------------------------------------------------------------------------------
# Construct boundary conditions, and add them to our physics packages.
#-------------------------------------------------------------------------------
xbcPlane = Plane3d(Vector3d(0.0, 0.0), Vector3d(1.0, 0.0))
xbc = ReflectingBoundary3d(xbcPlane)
rzbc = CylindricalBoundary(db)
for package in integrator.physicsPackages():
package.appendBoundary(rzbc)
package.appendBoundary(xbc)
#-------------------------------------------------------------------------------
# Build the controller.
#-------------------------------------------------------------------------------
control = SpheralController(integrator, WT,
statsStep = statsStep,
restartStep = restartStep,
redistributeStep = redistributeStep,
restartBaseName = restartBaseName,
initializeMassDensity = False)
output("control")
#-------------------------------------------------------------------------------
# Select the nodes for the VISAR sampling.
#-------------------------------------------------------------------------------
class SelectVISARNodes:
def __init__(self, x0, dx, dy, label):
self.x0 = x0
self.dx = dx
self.dy = dy
self.label = label
return
def __call__(self, nodes):
r = nodes.positions()
potentials = [i for i in xrange(nodes.numInternalNodes)
if abs(r[i].x - self.x0) < self.dx]
ymax = mpi.allreduce(max([r[i].y for i in potentials] + [-1e50]), mpi.MAX)
result = [i for i in potentials if r[i].y > ymax - self.dy]
print "Selected %i %s velocimetry test points." % (mpi.allreduce(len(result), mpi.SUM),
self.label)
return result
#-------------------------------------------------------------------------------
# Sampling function to measure the average velocity at the VISAR probe sites.
#-------------------------------------------------------------------------------
def averageCylindricalRadialVelocity(nodes, indicies):
m = nodes.mass()
v = nodes.velocity()
massSum = 1e-30
result = 0.0
for i in indicies:
assert i >= 0 and i < nodes.numInternalNodes
massSum += m[i]
result += m[i] * v[i].y
globalMassSum = mpi.allreduce(massSum, mpi.SUM)
globalResult = mpi.allreduce(result, mpi.SUM)
assert globalMassSum > 0.0
globalResult /= globalMassSum
return globalResult
#-------------------------------------------------------------------------------
# Build the history objects to simulate the VISAR velocity probes.
#-------------------------------------------------------------------------------
nodesA = SelectVISARNodes(xVISARa, dxVISAR, dyVISAR, "A")
nodesB = SelectVISARNodes(xVISARb, dxVISAR, dyVISAR, "B")
nodesC = SelectVISARNodes(xVISARc, dxVISAR, dyVISAR, "C")
VISARa = NodeHistory(nodesSteel, nodesA, averageCylindricalRadialVelocity,
dataDir + "/VISAR-a")
VISARb = NodeHistory(nodesSteel, nodesB, averageCylindricalRadialVelocity,
dataDir + "/VISAR-b")
VISARc = NodeHistory(nodesSteel, nodesC, averageCylindricalRadialVelocity,
dataDir + "/VISAR-c")
VISARa.nodeFlags.setName("VISAR a points")
VISARb.nodeFlags.setName("VISAR b points")
VISARc.nodeFlags.setName("VISAR c points")
control.appendPeriodicWork(VISARa.sample, VISARsampleFrequency)
control.appendPeriodicWork(VISARb.sample, VISARsampleFrequency)
control.appendPeriodicWork(VISARc.sample, VISARsampleFrequency)
#-------------------------------------------------------------------------------
# Drop visualization files.
#-------------------------------------------------------------------------------
def viz(fields = [],
filename = "ExpandingTube-rz"):
damage = damageModel.damage()
dtrace = ScalarField3d("damage magnitude", nodesSteel)
dmin = ScalarField3d("damage min", nodesSteel)
dmax = ScalarField3d("damage max", nodesSteel)
strain = damageModel.strain()
svol = ScalarField3d("strain vol", nodesSteel)
smin = ScalarField3d("strain min", nodesSteel)
smax = ScalarField3d("strain max", nodesSteel)
for i in xrange(nodesSteel.numInternalNodes):
dtrace[i] = damage[i].Trace()
dev = damage[i].eigenValues()
dmin[i] = dev.minElement()
dmax[i] = dev.maxElement()
svol[i] = strain[i].Trace()
sev = strain[i].eigenValues()
smin[i] = sev.minElement()
smax[i] = sev.maxElement()
dumpPhysicsState(integrator,
filename,
visitDir,
fields = [damageModel.sumActivationEnergiesPerNode(),
damageModel.numFlawsPerNode(),
VISARa.nodeFlags,
VISARb.nodeFlags,
VISARc.nodeFlags,
dtrace,
dmin,
dmax,
svol,
smin,
smax] + fields,
)
#-------------------------------------------------------------------------------
# Smooth the initial conditions/restore state.
#-------------------------------------------------------------------------------
if restoreCycle is not None:
control.loadRestartFile(restoreCycle)
control.setRestartBaseName(restartBaseName)
control.setFrequency(control.updateDomainDistribution, redistributeStep)
VISARa.flushHistory()
VISARb.flushHistory()
VISARc.flushHistory()
else:
control.iterateIdealH()
control.smoothState(smoothIters)
control.dropRestartFile()
viz()
#-------------------------------------------------------------------------------
# Advance to the end time.
#-------------------------------------------------------------------------------
while control.time() < goalTime:
nextGoalTime = min(control.time() + dtSample, goalTime)
control.advance(nextGoalTime, maxSteps)
control.dropRestartFile()
viz()
```
#### File: functional/Gravity/spherical-3d-SPH.py
```python
from math import *
from Spheral import *
from SpheralTestUtilities import *
from SpheralGnuPlotUtilities import *
from SpheralVisitDump import dumpPhysicsState
import mpi
from GenerateNodeDistribution3d import *
from CubicNodeGenerator import GenerateCubicNodeDistribution
title("3-D spherical gravity test")
#-------------------------------------------------------------------------------
# Generic problem parameters
#-------------------------------------------------------------------------------
commandLine(NodeListConstructor = SphNodeList3d,
seed = "lattice",
nx = 10,
ny = 10,
nz = 10,
rho1 = 1.0,
eps1 = 0.0,
vr1 = -1.0,
nPerh = 1.25,
rmin = 0.0,
rmax = 1.0,
gamma = 5.0/3.0,
mu = 1.0,
#Qconstructor = MonaghanGingoldViscosity3d,
Qconstructor = TensorMonaghanGingoldViscosity3d,
Cl = 1.0,
Cq = 0.75,
Qlimiter = True,
balsaraCorrection = False,
epsilon2 = 1e-2,
negligibleSoundSpeed = 1e-5,
csMultiplier = 1e-4,
hmin = 1e-5,
hmax = 1.0,
hminratio = 0.05,
HsmoothFraction = 0.0,
cfl = 0.5,
XSPH = True,
epsilonTensile = 0.0,
nTensile = 8,
HEvolution = Hydro3d.HEvolutionType.IdealH,
limitIdealH = False,
goalTime = 0.6,
dt = 0.0001,
dtMin = 1.0e-5,
dtMax = None,
dtGrowth = 2.0,
dtSample = 0.1,
maxSteps = None,
statsStep = 10,
smoothIters = 0,
sumForMassDensity = Hydro3d.MassDensityType.RigorousSumDensity,
restoreCycle = None,
L1v0 = 0.0889732,
L1rho0 = 5.51975,
L1eps0 = 0.04701,
L1P0 = 1.66301,
L1A0 = 0.00344783,
graphics = False,
)
#-------------------------------------------------------------------------------
# If we're using the cubic node generator, then scale things so we get a
# constant work per domain, and run to the same self-similar shock fraction
# of the node distribution.
#-------------------------------------------------------------------------------
if seed == "cubic":
nxdomains = int(mpi.procs**(1.0/3.0) + 0.1)
assert nxdomains**3 == mpi.procs
nx *= nxdomains
ny *= nxdomains
nz *= nxdomains
print nxdomains, nx, ny, nz
#-------------------------------------------------------------------------------
# A few derived variables.
#-------------------------------------------------------------------------------
xmin = (0.0, 0.0, 0.0)
xmax = (1.0, 1.0, 1.0)
dataDir = "spherical-3d-SPH-%ix%ix%i" % (nx, ny, nz)
visitDir = dataDir + "/visit"
#-------------------------------------------------------------------------------
# Check if the necessary output directories exist. If not, create them.
#-------------------------------------------------------------------------------
import os, sys
if mpi.rank == 0:
if not os.path.exists(visitDir):
os.makedirs(visitDir)
mpi.barrier()
#-------------------------------------------------------------------------------
# Material properties.
#-------------------------------------------------------------------------------
eos = GammaLawGasMKS3d(gamma, mu)
#-------------------------------------------------------------------------------
# Interpolation kernels.
#-------------------------------------------------------------------------------
WT = TableKernel3d(BSplineKernel3d(), 1000)
WTPi = TableKernel3d(BSplineKernel3d(), 1000)
output("WT")
output("WTPi")
kernelExtent = WT.kernelExtent()
#-------------------------------------------------------------------------------
# Make the NodeList.
#-------------------------------------------------------------------------------
nodes = NodeListConstructor("nodes", eos, WT, WTPi)
output("nodes")
nodes.HsmoothFraction = HsmoothFraction
nodes.XSPH = XSPH
nodes.nodesPerSmoothingScale = nPerh
nodes.epsilonTensile = epsilonTensile
nodes.nTensile = nTensile
nodes.hmin = hmin
nodes.hmax = hmax
nodes.hminratio = hminratio
output("nodes.HsmoothFraction")
output("nodes.nodesPerSmoothingScale")
output("nodes.epsilonTensile")
output("nodes.nTensile")
output("nodes.XSPH")
output("nodes.hmin")
output("nodes.hmax")
output("nodes.hminratio")
#-------------------------------------------------------------------------------
# Construct the neighbor object.
#-------------------------------------------------------------------------------
neighbor1 = TreeNeighbor3d(nodes,
kernelExtent = kernelExtent)
nodes.registerNeighbor(neighbor1)
#-------------------------------------------------------------------------------
# Set the node properties.
#-------------------------------------------------------------------------------
if restoreCycle is None:
if seed == "cubic":
from DistributeNodes import nullDistributeNodes3d
generator = GenerateCubicNodeDistribution(nx, ny, nz, rho1,
xmin = xmin,
xmax = xmax,
nNodePerh = nPerh,
SPH = (NodeListConstructor == SphNodeList3d))
nullDistributeNodes3d((nodes, generator))
else:
from ParMETISDistributeNodes import distributeNodes3d
generator = GenerateNodeDistribution3d(nx, ny, nz, rho1, seed,
xmin = xmin,
xmax = xmax,
rmin = rmin,
rmax = rmax,
nNodePerh = nPerh,
SPH = (NodeListConstructor == SphNodeList3d))
distributeNodes3d((nodes, generator))
output("mpi.reduce(nodes.numInternalNodes, mpi.MIN)")
output("mpi.reduce(nodes.numInternalNodes, mpi.MAX)")
output("mpi.reduce(nodes.numInternalNodes, mpi.SUM)")
# Set node specific thermal energies
nodes.specificThermalEnergy(ScalarField3d("tmp", nodes, eps1))
# Set node velocities
for nodeID in xrange(nodes.numNodes):
nodes.velocity()[nodeID] = nodes.positions()[nodeID].unitVector()*vr1
#-------------------------------------------------------------------------------
# Construct a DataBase to hold our node list
#-------------------------------------------------------------------------------
db = DataBase3d()
output("db")
output("db.appendNodeList(nodes)")
output("db.numNodeLists")
output("db.numFluidNodeLists")
#-------------------------------------------------------------------------------
# Construct the artificial viscosities for the problem.
#-------------------------------------------------------------------------------
q = Qconstructor(Cl, Cq)
q.limiter = Qlimiter
q.balsaraShearCorrection = balsaraCorrection
q.epsilon2 = epsilon2
q.negligibleSoundSpeed = negligibleSoundSpeed
q.csMultiplier = csMultiplier
output("q")
output("q.Cl")
output("q.Cq")
output("q.limiter")
output("q.epsilon2")
output("q.negligibleSoundSpeed")
output("q.csMultiplier")
output("q.balsaraShearCorrection")
##-------------------------------------------------------------------------------
## Construct the hydro physics object.
##-------------------------------------------------------------------------------
#hydro = Hydro3d(WT, WTPi, q)
#hydro.cfl = cfl
#hydro.HEvolution = HEvolution
#hydro.sumForMassDensity = sumForMassDensity
#hydro.HsmoothMin = hmin
#hydro.HsmoothMax = hmax
#output("hydro")
#output("hydro.cfl")
#output("hydro.HEvolution")
#output("hydro.sumForMassDensity")
#output("hydro.HsmoothMin")
#output("hydro.HsmoothMax")
#output("hydro.kernel()")
#output("hydro.PiKernel()")
#output("hydro.valid()")
#-------------------------------------------------------------------------------
# Construct the gravity physics object.
#-------------------------------------------------------------------------------
G = 1.0
gravity = SPHGravity3d(WT, G, 2.0)
#-------------------------------------------------------------------------------
# Create boundary conditions.
#-------------------------------------------------------------------------------
#xPlane0 = Plane3d(Vector3d(0.0, 0.0, 0.0), Vector3d(1.0, 0.0, 0.0))
#yPlane0 = Plane3d(Vector3d(0.0, 0.0, 0.0), Vector3d(0.0, 1.0, 0.0))
#zPlane0 = Plane3d(Vector3d(0.0, 0.0, 0.0), Vector3d(0.0, 0.0, 1.0))
#xbc0 = ReflectingBoundary3d(xPlane0)
#ybc0 = ReflectingBoundary3d(yPlane0)
#zbc0 = ReflectingBoundary3d(zPlane0)
#gravity.appendBoundary(xbc0)
#gravity.appendBoundary(ybc0)
#gravity.appendBoundary(zbc0)
#output("gravity.haveBoundary(xbc0)")
#output("gravity.haveBoundary(ybc0)")
#output("gravity.haveBoundary(zbc0)")
#-------------------------------------------------------------------------------
# Construct a time integrator.
#-------------------------------------------------------------------------------
integrator = SynchronousRK2Integrator3d(db)
integrator.appendPhysicsPackage(gravity)
integrator.lastDt = dt
if dtMin:
integrator.dtMin = dtMin
if dtMax:
integrator.dtMax = dtMax
integrator.dtGrowth = dtGrowth
output("integrator")
output("integrator.havePhysicsPackage(gravity)")
output("integrator.valid()")
output("integrator.lastDt")
output("integrator.dtMin")
output("integrator.dtMax")
output("integrator.dtGrowth")
#-------------------------------------------------------------------------------
# Build the controller.
#-------------------------------------------------------------------------------
control = SpheralController(integrator, WT,
statsStep = statsStep)
output("control")
#-------------------------------------------------------------------------------
# Advance one step.
#-------------------------------------------------------------------------------
nextGoalTime = min(control.time() + dtSample, goalTime)
control.step()
D = gravity.matrix()
Dij = D.entries()
Ds = D.structure
Dsij = Ds.entries()
psi = gravity.potential
stop
#-------------------------------------------------------------------------------
# Plot the results.
#-------------------------------------------------------------------------------
import NohAnalyticSolution
answer = NohAnalyticSolution.NohSolution(3, h0 = nPerh*rmax/nx)
if graphics:
from SpheralGnuPlotUtilities import *
# Plot the final state.
rhoPlot, vrPlot, epsPlot, PPlot, HPlot = plotRadialState(db)
# Overplot the analytic solution.
plotAnswer(answer, control.time(),
rhoPlot = rhoPlot,
velPlot = vrPlot,
epsPlot = epsPlot,
PPlot = PPlot)
#-------------------------------------------------------------------------------
# Measure the difference between the simulation and analytic answer.
#-------------------------------------------------------------------------------
# Figure out which of the node we want to measure the error on.
rmin = 0.05
rmax = 0.35
rall = [x.magnitude() for x in nodes.positions().internalValues()]
imask = [i for i in xrange(nodes.numInternalNodes)
if (rall[i] > rmin and rall[i] < rmax)]
Nlocal = len(imask)
Nglobal = mpi.allreduce(Nlocal, mpi.SUM)
# Find the local profiles.
r = nodes.positions().internalValues()
vel = nodes.velocity().internalValues()
rho = nodes.massDensity().internalValues()
eps = nodes.specificThermalEnergy().internalValues()
P = nodes.pressure().internalValues()
xprof = [rall[i] for i in imask]
vprof = [vel[i].dot(r[i].unitVector()) for i in imask]
rhoprof = [rho[i] for i in imask]
epsprof = [eps[i] for i in imask]
Pprof = [P[i] for i in imask]
Aprof = [Pi/rhoi**gamma for (Pi, rhoi) in zip(Pprof, rhoprof)]
# Compute the analytic answer on the positions of the nodes.
xans, vans, epsans, rhoans, Pans, hans = answer.solution(control.time(), xprof)
Aans = [Pi/rhoi**gamma for (Pi, rhoi) in zip(Pans, rhoans)]
# Compute the L1 error norms.
def computeL1(q, q0):
if Nlocal > 0:
import Pnorm
error = [qi - q0i for (qi, q0i) in zip(q, q0)]
Pn = Pnorm.Pnorm(error, xprof)
L1local = Pn.pnormAverage(1, rmin = 0.05, rmax = 0.35)
else:
L1local = 0.0
return mpi.allreduce(L1local*Nlocal, mpi.SUM)/Nglobal
L1v = computeL1(vprof, vans)
L1rho = computeL1(rhoprof, rhoans)
L1eps = computeL1(epsprof, epsans)
L1P = computeL1(Pprof, Pans)
L1A = computeL1(Aprof, Aans)
# Now we can compare the errors to our expectations.
for (name, L1, L10) in [("Velocity ", L1v, L1v0),
("Mass Density", L1rho, L1rho0),
("Thermal E ", L1eps, L1eps0),
("Pressure ", L1P, L1P0),
("Entropy ", L1A, L1A0)]:
L1exp = L10 * (nx/25.0)**(-0.8)
print "\t%s L1 = %g < %g" % (name, L1, L1exp)
if L1 > L1exp:
raise "L1 error estimate for %s outside expected bounds: %g != %g" % (name,
L1,
L1exp)
```
#### File: Hydro/AcousticWave/CSPH_mod_package.py
```python
from Spheral1d import *
class CRKSPH_mod_package(Physics):
def __init__(self):
Physics.__init__(self)
return
def evaluateDerivatives(self, t, dt, db, state, derivs):
return
def dt(self, db, state, derivs, t):
return pair_double_string(1e100, "No vote")
def registerState(self, dt, state):
return
def registerDerivatives(self, db, derivs):
return
def label(self):
return "CRKSPH_mod_package"
def initialize(self, t, dt, db, state, derivs):
# Grab the CRKSPH arrays.
A0_fl = state.scalarFields(HydroFieldNames.A0_CRKSPH)
A_fl = state.scalarFields(HydroFieldNames.A_CRKSPH)
B_fl = state.vectorFields(HydroFieldNames.B_CRKSPH)
A0 = A0_fl[0]
A = A_fl[0]
B = B_fl[0]
print "A", A.internalValues()
return
```
#### File: Hydro/BlobTest/CloudMassFraction.py
```python
from NodeHistory import NodeHistory
import Spheral
import mpi
class CloudMassFraction(NodeHistory):
#---------------------------------------------------------------------------
# Constructor
#---------------------------------------------------------------------------
def __init__(self,
r0, # initial blob radius
rhoThreshold, # density cutoff
epsThreshold, # specific thermal energy cutoff
nodes, # blob NodeList
filename): # file to write the results to
self.r0 = r0
self.rho0 = rhoThreshold
self.eps0 = epsThreshold
NodeHistory.__init__(self,
nodes,
[],
self.measureCloudFraction,
filename,
labels = ("mfrac","mass","volume"))
# Check our dimensionality
if isinstance(nodes, Spheral.NodeList2d):
self.ndim = 2
elif isinstance(nodes, Spheral.NodeList3d):
self.ndim = 3
else:
raise RuntimeError, "What the heck is %s?" % nodes
# Find the starting mass of the cloud.
self.M0 = nodes.mass().sumElements()
return
#---------------------------------------------------------------------------
# Do our measurements.
#---------------------------------------------------------------------------
def measureCloudFraction(self, nodes, indices):
mass = nodes.mass()
rho = nodes.massDensity()
eps = nodes.specificThermalEnergy()
msum, volsum = 0.0, 0.0
for i in xrange(nodes.numInternalNodes):
if rho[i] > self.rho0 and eps[i] < self.eps0:
msum += mass[i]
volsum += mass[i]/rho[i]
msum = mpi.allreduce(msum, mpi.SUM)
volsum = mpi.allreduce(volsum, mpi.SUM)
return msum/self.M0, msum, volsum
```
#### File: ICF/KidderIsentropicImplosion/KidderIsentropicCapsuleAnalyticSolution.py
```python
from math import *
from SpheralTestUtilities import sgn
from numericalIntegration import trapezoidalIntegration
class KidderIsentropicCapsuleAnalyticSolution:
#---------------------------------------------------------------------------
# Constructor.
#---------------------------------------------------------------------------
def __init__(self,
nu, # Dimension (1, 2, or 3).
r0, # Initial inner boundary radius
r1, # Initial outer boundary radius
P0, # Initial inner boundary pressure
P1, # Initial outer boundary pressure
rho1, # Initial outer boundary density
):
# Pre-conditions.
assert nu in (1, 2, 3)
assert r0 < r1
assert rho1 > 0.0
# Derive a bunch of useful parameters.
self.nu = nu
self.r0 = r0
self.r1 = r1
self.P0 = P0
self.P1 = P1
self.rho1 = rho1
self.gamma = 1.0 + 2.0/nu
self.gamma1 = self.gamma - 1.0
self.S = P1/(rho1**self.gamma)
self.rho0 = rho1*(P0/P1)**(1.0/self.gamma)
self.tau = sqrt(0.5*self.gamma1 * (r1*r1 - r0*r0)/(self.S*self.gamma*(self.rho1**self.gamma1 - self.rho0**self.gamma1)))
assert self.tau > 0.0
# Time 0 energy constants.
self.K0 = trapezoidalIntegration(self._K0profile, r0, r1, 2001)
self.T0 = trapezoidalIntegration(self._T0profile, r0, r1, 2001)
return
#---------------------------------------------------------------------------
# tfrac : The dimensionless time.
#---------------------------------------------------------------------------
def tfrac(self, t):
t1 = t/self.tau
assert t1 >= 0.0 and t1 <= 1.0
return t1
#---------------------------------------------------------------------------
# hfrac : the dimensionless radius scaling.
#---------------------------------------------------------------------------
def hfrac(self, t):
return sqrt(max(0.0, 1.0 - self.tfrac(t)**2))
#---------------------------------------------------------------------------
# hfracDot : the time derivative of the dimensionless radius scaling.
#---------------------------------------------------------------------------
def hfracDot(self, t):
return -t/(self.tau*self.tau * self.hfrac(t))
#---------------------------------------------------------------------------
# Inner and outer radii as a function of time.
#---------------------------------------------------------------------------
def rInner(self, t):
return self.r0*self.hfrac(t)
def rOuter(self, t):
return self.r1*self.hfrac(t)
#---------------------------------------------------------------------------
# Inner and outer pressures as a function of time.
#---------------------------------------------------------------------------
def Pinner(self, t):
return self.P0/self.hfrac(t)**(2.0*self.gamma/self.gamma1)
def Pouter(self, t):
return self.P1/self.hfrac(t)**(2.0*self.gamma/self.gamma1)
#---------------------------------------------------------------------------
# Inner and outer velocities as a function of time.
#---------------------------------------------------------------------------
def vrInner(self, t):
return -self.r0 * t/(self.tau*self.tau * self.hfrac(t))
def vrOuter(self, t):
return -self.r1 * t/(self.tau*self.tau * self.hfrac(t))
#---------------------------------------------------------------------------
# The initial radius for a given radius and time.
#---------------------------------------------------------------------------
def initialRadius(self, t, r):
ht = self.hfrac(t)
assert ht > 0.0
return r/ht
#---------------------------------------------------------------------------
# The allowed radial bounds as a function of time.
#---------------------------------------------------------------------------
def rRange(self, t):
hi = self.hfrac(t)
return self.r0*hi, self.r1*hi
#---------------------------------------------------------------------------
# Check that the given r at time t is in the allowed bounds of the shell.
#---------------------------------------------------------------------------
def rInBounds(self, t, r):
rmin, rmax = self.rRange(t)
return r/rmin >= 0.99999 and r/rmax <= 1.00001
#---------------------------------------------------------------------------
# Return r in the expected boundaries.
#---------------------------------------------------------------------------
def boundedR(self, t, r):
rmin, rmax = self.rRange(t)
if r < rmin:
return rmin
elif r > rmax:
return rmax
else:
return r
#---------------------------------------------------------------------------
# The initial density and pressure.
# Note you must pass in the *initial* radius for the point you want!
#---------------------------------------------------------------------------
def rhoInitial(self, ri):
thpt = 1.0/(self.r1**2 - self.r0**2)
ack = ((self.r1**2 - ri*ri)*thpt*self.rho0**self.gamma1 +
(ri*ri - self.r0**2)*thpt*self.rho1**self.gamma1)
acksgn = sgn(ack)
return acksgn * abs(ack)**(1.0/self.gamma1)
def Pinitial(self, ri):
return self.S*(self.rhoInitial(ri))**self.gamma
#---------------------------------------------------------------------------
# The density, velocity, pressure, and specific thermal energy profiles as
# a function of radius and time.
#---------------------------------------------------------------------------
def rho(self, t, r):
t1 = self.tfrac(t)
hi = self.hfrac(t)
rho0i = self.rhoInitial(r/hi)
return rho0i/hi**(2.0/self.gamma1)
def vr(self, t, r):
hi = self.hfrac(t)
return -r*t/(hi*self.tau)**2
def P(self, t, r):
hi = self.hfrac(t)
P0i = self.Pinitial(r/hi)
return P0i/hi**(2.0*self.gamma/self.gamma1)
def eps(self, t, r):
return self.P(t, r)/(self.gamma1*self.rho(t, r))
#---------------------------------------------------------------------------
# The time derivatives of the density, velocity, and pressure.
#---------------------------------------------------------------------------
def rhoDot(self, t, r):
return -2.0*self.hfracDot(t)*self.rho(t, r)/(self.gamma1*self.hfrac(t))
def vrDot(self, t, r):
hi = self.hfrac(t)
return (2.0*r*t*self.hfracDot(t)/(self.tau*self.tau * hi*hi*hi) -
(r + t*self.vr(t, r))/(self.tau*self.tau * hi*hi))
#return 2.0* self.tau**2 *r*t*self.hfracDot(t)/self.hfrac(t) - r - t*self.vr(t, r)
#return -(self.hfracDot(t)*self.vr(t, r) + self.initialRadius(t, r)/(self.tau*self.tau))/self.hfrac(t)
def Pdot(self, t, r):
return -2.0*self.gamma*self.hfracDot(t)*self.P(t, r)/(self.gamma1*self.hfrac(t))
#---------------------------------------------------------------------------
# The radial gradient of the radial velocity.
#---------------------------------------------------------------------------
def DvrDr(self, t, r):
return -t/(self.tau*self.tau*self.hfrac(t)**2)
#---------------------------------------------------------------------------
# The energies in the system as a function of time.
#---------------------------------------------------------------------------
def kineticEnergy(self, t):
return self.K0 * t*t / self.hfrac(t)**((self.gamma + 1.0)/self.gamma1)
def thermalEnergy(self, t):
return self.T0 / self.hfrac(t)**((self.gamma + 1.0)/self.gamma1)
def totalEnergy(self, t):
return (self.K0 * t*t + self.T0)/self.hfrac(t)**((self.gamma + 1.0)/self.gamma1)
#---------------------------------------------------------------------------
# The profiles for the energy computations at time 0.
#---------------------------------------------------------------------------
def _K0profile(self, r):
return 0.5/(self.tau**4)*r*r*self.rhoInitial(r)
def _T0profile(self, r):
return self.Pinitial(r)/self.gamma1
```
#### File: Hydro/KeplerDisk/TwoMatDisk.py
```python
from Spheral2d import *
from SpheralTestUtilities import *
from SpheralGnuPlotUtilities import *
from findLastRestart import *
from math import *
import SpheralPointmeshSiloDump
# Load the mpi module if we're parallel.
import mpi
#mpi, rank, procs = mpi.loadmpi()
from GenerateNodeDistribution2d import *
title("2-D Keplerian disk with arbitrary pressure support.")
# serialDump thing for external viz
class sDump(object):
def __init__(self,nodeSet,directory):
self.nodeSet = nodeSet
self.directory = directory
def __call__(self, cycle, time, dt):
procs = mpi.procs
rank = mpi.rank
serialData = []
i,j = 0,0
for i in xrange(procs):
for nodeL in self.nodeSet:
if rank == i:
for j in xrange(nodeL.numInternalNodes):
serialData.append([nodeL.positions()[j],
3.0/(nodeL.Hfield()[j].Trace()),
nodeL.mass()[j],nodeL.massDensity()[j],
nodeL.specificThermalEnergy()[j]])
serialData = mpi.reduce(serialData,mpi.SUM)
if rank == 0:
f = open(self.directory + "/serialDump" + str(cycle) + ".ascii",'w')
for i in xrange(len(serialData)):
f.write("{0} {1} {2} {3} {4} {5} {6} {7}\n".format(i,serialData[i][0][0],serialData[i][0][1],0.0,serialData[i][1],serialData[i][2],serialData[i][3],serialData[i][4]))
f.close()
#-------------------------------------------------------------------------------
# Generic problem parameters
#-------------------------------------------------------------------------------
commandLine(asph = False,
n = 100,
thetaMin = 0.0,
thetaMax = 2.0*pi,
rmin = 0.0,
rmax = 3.0,
nPerh = 1.51,
# Properties of the central gravitating particle.
G0 = 1.0,
M0 = 1.0,
Rc = 0.5,
R0 = Vector(0.0, 0.0),
# Properties of the gas disk.
rho0 = 1.0,
rd0 = 10.0,
sig = 2.5,
Rcutoff = 0.5,
# Material properties of the gas.
polytropicIndex = 2.0,
mu = 1.0,
SVPH = False,
CRKSPH = False,
ASPH = False,
SPH = True, # This just chooses the H algorithm -- you can use this with CRKSPH for instance.
XSPH = False,
epsilonTensile = 0.0,
nTensile = 8,
# Hydro
Qconstructor = MonaghanGingoldViscosity2d,
#Qconstructor = TensorMonaghanGingoldViscosity2d,
KernelConstructor = NBSplineKernel,
order = 5,
boolReduceViscosity = False,
nh = 5.0,
aMin = 0.1,
aMax = 2.0,
Qhmult = 1.0,
boolCullenViscosity = False,
alphMax = 2.0,
alphMin = 0.02,
betaC = 0.7,
betaD = 0.05,
betaE = 1.0,
fKern = 1.0/3.0,
boolHopkinsCorrection = True,
Cl = 1.0,
Cq = 0.75,
Qlimiter = False,
balsaraCorrection = False,
epsilon2 = 1e-4,
negligibleSoundSpeed = 1e-5,
csMultiplier = 0.1,
correctionOrder = LinearOrder,
hmin = 0.004,
hmax = 0.5,
hminratio = 0.1,
compatibleEnergy = True,
gradhCorrection = False,
HEvolution = IdealH,
sumForMassDensity = RigorousSumDensity,
densityUpdate = RigorousSumDensity, # VolumeScaledDensity,
HUpdate = IdealH,
filter = 0.0,
volumeType = RKSumVolume,
# Timestep constraints
cfl = 0.5,
deltaPhi = 0.01,
domainIndependent = False,
# Integrator and run time.
IntegratorConstructor = CheapSynchronousRK2Integrator,
steps = None,
goalTime = 10.0,
dt = 0.0001,
dtMin = 1.0e-5,
dtMax = 0.1,
dtGrowth = 2.0,
maxSteps = None,
statsStep = 10,
redistributeStep = None,
restartStep = 500,
restoreCycle = -1,
smoothIters = 0,
rigorousBoundaries = True,
dtverbose = False,
serialDump = False,
serialDumpEach = 100,
histFile = "history.ascii",
writeHistory = False,
historyInterval = 2.0,
clearDirectories = False,
dataDir = "twomat-%i",
outputFile = "None",
comparisonFile = "None",
vizCycle = None,
vizTime = 1.0,
vizMethod = SpheralPointmeshSiloDump.dumpPhysicsState
)
polytropicConstant1 = G0*M0/(3.0*Rc*sqrt(rho0))
polytropicConstant2 = G0*M0/(3.0*Rc*sqrt(rho0*0.5))
# Decide on our hydro algorithm.
if SVPH:
if ASPH:
HydroConstructor = ASVPHFacetedHydro
else:
HydroConstructor = SVPHFacetedHydro
elif CRKSPH:
if ASPH:
HydroConstructor = ACRKSPHHydro
else:
HydroConstructor = CRKSPHHydro
Qconstructor = LimitedMonaghanGingoldViscosity
else:
if ASPH:
HydroConstructor = ASPHHydro
else:
HydroConstructor = SPHHydro
# Data output info.
dataDir = dataDir % n
viscString = "MG"
if balsaraCorrection:
viscString = "Balsara"
elif boolCullenViscosity:
viscString = "Cullen"
dataDir = os.path.join(dataDir, "CRK=%s-Visc=%s-nPerh=%f-compatible=%s-volume=%s" % (CRKSPH,viscString,nPerh,compatibleEnergy,volumeType))
dataDir = os.path.join(dataDir, "Cl=%f-Cq=%f" % (Cl,Cq))
restartBaseName = "%s/KeplerianDisk-n=%i" % (dataDir,
n)
vizDir = os.path.join(dataDir, "visit")
vizBaseName = "Kepler-disk-2d"
#-------------------------------------------------------------------------------
# Check if the necessary output directories exist. If not, create them.
#-------------------------------------------------------------------------------
import os, sys
if mpi.rank == 0:
if not os.path.exists(dataDir):
os.makedirs(dataDir)
if not os.path.exists(vizDir):
os.makedirs(vizDir)
mpi.barrier()
#-------------------------------------------------------------------------------
# Define a helper class that knows how to specify our requested radial profiles
# for rho, v, and eps.
#-------------------------------------------------------------------------------
class KeplerianPressureDiskProfile:
def __init__(self,G,M,n,rc,rho0):
self.G = G
self.M = M
self.GM = G*M
self.gamma = (n+1.0)/n
self.rc = rc
self.rho0 = rho0
self.K = G*M/(3.0*rc*sqrt(rho0))
return
def rho(self,r):
a = self.GM*(self.gamma-1.0)/(self.K*self.gamma*sqrt(r**2+self.rc**2))
return pow(a,1.0/(self.gamma-1.0))
def pressure(self,r):
return self.K*self.rho(r)**self.gamma
def __call__(self,r):
return self.rho(r)
#-------------------------------------------------------------------------------
# Create a polytrope for the equation of state.
#-------------------------------------------------------------------------------
eos1 = PolytropicEquationOfStateMKS(polytropicConstant1,
polytropicIndex, mu)
eos2 = PolytropicEquationOfStateMKS(polytropicConstant2,
polytropicIndex, mu)
#-------------------------------------------------------------------------------
# Create our interpolation kernels -- one for normal hydro interactions, and
# one for use with the artificial viscosity
#-------------------------------------------------------------------------------
WT = TableKernel(KernelConstructor(order), 1000)
WTPi = TableKernel(KernelConstructor(order), 1000)
output('WT')
output('WTPi')
#-------------------------------------------------------------------------------
# Create the NodeList and distribute it's nodes.
#-------------------------------------------------------------------------------
diskNodes1 = makeFluidNodeList("diskNodes1", eos1,
hmin = hmin,
hmax = hmax,
hminratio = hminratio,
nPerh = nPerh)
diskNodes2 = makeFluidNodeList("diskNodes2", eos2,
hmin = hmin,
hmax = hmax,
hminratio = hminratio,
nPerh = nPerh)
output("diskNodes1")
output("diskNodes1.hmin")
output("diskNodes1.hmax")
output("diskNodes1.hminratio")
output("diskNodes1.nodesPerSmoothingScale")
#output("diskNodes.epsilonTensile")
#output("diskNodes.nTensile")
#output("diskNodes.XSPH")
# Construct the neighbor object and associate it with the node list.
#neighbor1 = TreeNeighbor(diskNodes1,
# kernelExtent = WT.kernelExtent)
#diskNodes1.registerNeighbor(neighbor1)
#diskNodes2.registerNeighbor(neighbor2)
# Build the radial profile object that knows how to create the keplerian disk
# profile.
diskProfile1 = KeplerianPressureDiskProfile(G0, M0, polytropicIndex, Rc, rho0)
diskProfile2 = KeplerianPressureDiskProfile(G0, M0, polytropicIndex, Rc, rho0*0.5)
# Set node positions, masses, and H's for this domain.
from VoronoiDistributeNodes import distributeNodes2d as distributeNodes
print "Generating node distribution."
generator1 = GenerateNodesMatchingProfile2d(n*0.25, diskProfile1,
rmin = rmin,
rmax = rmax*0.25,
thetaMin = thetaMin,
thetaMax = thetaMax,
nNodePerh = nPerh)
n1 = generator1.globalNumNodes()
generator2 = GenerateNodesMatchingProfile2d(n*0.75, diskProfile2,
rmin = rmax*0.27,
rmax = rmax,
thetaMin = thetaMin,
thetaMax = thetaMax,
nNodePerh = nPerh,
m0 = generator1.m0)
n1 = generator1.globalNumNodes()
n2 = generator2.globalNumNodes()
print "Distributing nodes amongst processors."
distributeNodes((diskNodes1, generator1),(diskNodes2,generator2))
output('mpi.reduce(diskNodes1.numInternalNodes, mpi.MIN)')
output('mpi.reduce(diskNodes1.numInternalNodes, mpi.MAX)')
output('mpi.reduce(diskNodes1.numInternalNodes, mpi.SUM)')
# Loop over the nodes, and set the specific energies and velocities.
for nodes in [diskNodes1,diskNodes2]:
for i in xrange(nodes.numInternalNodes):
r = nodes.positions()[i].magnitude()
#nodes.specificThermalEnergy()[i] = diskProfile.eps(r)
#-------------------------------------------------------------------------------
# Set an external pressure on the disk equivalent to the pressure at the
# cutoff radius.
#-------------------------------------------------------------------------------
externalPressure = eos2.polytropicConstant*diskProfile2.rho(1.01*rmax)**eos2.gamma_
eos2.externalPressure = externalPressure
#-------------------------------------------------------------------------------
# Construct a DataBase to hold our node list
#-------------------------------------------------------------------------------
db = DataBase()
output('db')
output('db.appendNodeList(diskNodes1)')
output('db.appendNodeList(diskNodes2)')
output('db.numNodeLists')
output('db.numFluidNodeLists')
#-------------------------------------------------------------------------------
# Construct the artificial viscosity.
#-------------------------------------------------------------------------------
q = Qconstructor(Cl, Cq)
q.limiter = Qlimiter
q.balsaraShearCorrection = balsaraCorrection
q.epsilon2 = epsilon2
q.negligibleSoundSpeed = negligibleSoundSpeed
q.csMultiplier = csMultiplier
output('q')
output('q.Cl')
output('q.Cq')
output('q.limiter')
output('q.epsilon2')
output('q.negligibleSoundSpeed')
output('q.csMultiplier')
output('q.balsaraShearCorrection')
#-------------------------------------------------------------------------------
# Create the gravity physics object.
#-------------------------------------------------------------------------------
gravity = PointPotential(G0, M0, Rc, R0)
gravity.deltaPotentialFraction = deltaPhi
output("gravity.G")
output("gravity.mass")
output("gravity.coreRadius")
output("gravity.origin")
output("gravity.deltaPotentialFraction")
#-------------------------------------------------------------------------------
# Construct the hydro physics object.
#-------------------------------------------------------------------------------
if SVPH:
hydro = HydroConstructor(W = WT,
Q = q,
cfl = cfl,
compatibleEnergyEvolution = compatibleEnergy,
densityUpdate = densityUpdate,
XSVPH = XSPH,
linearConsistent = linearConsistent,
generateVoid = False,
HUpdate = HUpdate,
fcentroidal = fcentroidal,
fcellPressure = fcellPressure,
xmin = Vector(-2.0, -2.0),
xmax = Vector(3.0, 3.0))
# xmin = Vector(x0 - 0.5*(x2 - x0), y0 - 0.5*(y2 - y0)),
# xmax = Vector(x2 + 0.5*(x2 - x0), y2 + 0.5*(y2 - y0)))
elif CRKSPH:
hydro = HydroConstructor(W = WT,
WPi = WTPi,
Q = q,
filter = filter,
cfl = cfl,
compatibleEnergyEvolution = compatibleEnergy,
XSPH = XSPH,
densityUpdate = densityUpdate,
correctionOrder = correctionOrder,
volumeType = volumeType,
HUpdate = HUpdate)
else:
hydro = HydroConstructor(W = WT,
WPi = WTPi,
Q = q,
cfl = cfl,
compatibleEnergyEvolution = compatibleEnergy,
gradhCorrection = gradhCorrection,
XSPH = XSPH,
densityUpdate = densityUpdate,
HUpdate = HUpdate,
epsTensile = epsilonTensile,
nTensile = nTensile)
output("hydro")
output("hydro.kernel()")
output("hydro.PiKernel()")
output("hydro.cfl")
output("hydro.compatibleEnergyEvolution")
output("hydro.densityUpdate")
output("hydro.HEvolution")
packages = [hydro]
#-------------------------------------------------------------------------------
# Construct the MMRV physics object.
#-------------------------------------------------------------------------------
if boolReduceViscosity:
evolveReducingViscosityMultiplier = MorrisMonaghanReducingViscosity(q,nh,aMin,aMax)
packages.append(evolveReducingViscosityMultiplier)
elif boolCullenViscosity:
evolveCullenViscosityMultiplier = CullenDehnenViscosity(q,WTPi,alphMax,alphMin,betaC,betaD,betaE,fKern,boolHopkinsCorrection)
packages.append(evolveCullenViscosityMultiplier)
#-------------------------------------------------------------------------------
# Construct a time integrator, and add the physics packages.
#-------------------------------------------------------------------------------
integrator = IntegratorConstructor(db)
for p in packages:
integrator.appendPhysicsPackage(gravity)
integrator.appendPhysicsPackage(p)
integrator.lastDt = dt
integrator.dtMin = dtMin
integrator.dtMax = dtMax
integrator.dtGrowth = dtGrowth
integrator.domainDecompositionIndependent = domainIndependent
integrator.verbose = dtverbose
integrator.rigorousBoundaries = rigorousBoundaries
# Blago! Currently a problem with periodic boundaries.
integrator.cullGhostNodes = False
output("integrator")
output("integrator.havePhysicsPackage(hydro)")
output("integrator.lastDt")
output("integrator.dtMin")
output("integrator.dtMax")
output("integrator.dtGrowth")
output("integrator.domainDecompositionIndependent")
output("integrator.rigorousBoundaries")
output("integrator.verbose")
#-------------------------------------------------------------------------------
# Build the controller to run the simulation.
#-------------------------------------------------------------------------------
control = SpheralController(integrator, WT,
statsStep = statsStep,
redistributeStep = redistributeStep,
restartStep = restartStep,
restartBaseName = restartBaseName,
vizMethod = vizMethod,
vizBaseName = vizBaseName,
vizDir = vizDir,
vizStep = vizCycle,
vizTime = vizTime,
vizDerivs = True,
restoreCycle = restoreCycle)
if serialDump:
dump = sDump([diskNodes1,diskNodes2],dataDir)
control.appendPeriodicWork(dump,serialDumpEach)
output('control')
#-------------------------------------------------------------------------------
# Function to measure the angular momentum and radial coordinate.
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Advance to the end time.
#-------------------------------------------------------------------------------
if steps is None:
control.advance(goalTime)
else:
control.step(steps)
if outputFile != "None":
outputFile = os.path.join(dataDir, outputFile)
from SpheralGnuPlotUtilities import multiSort
P1 = ScalarField("pressure",diskNodes1)
P2 = ScalarField("pressure",diskNodes2)
diskNodes1.pressure(P1)
diskNodes2.pressure(P2)
xprof1 = mpi.reduce([x.x for x in diskNodes1.positions().internalValues()], mpi.SUM)
yprof1 = mpi.reduce([y.y for y in diskNodes1.positions().internalValues()], mpi.SUM)
rhoprof1 = mpi.reduce(diskNodes1.massDensity().internalValues(), mpi.SUM)
Pprof1 = mpi.reduce(P1.internalValues(), mpi.SUM)
rprof1 = mpi.reduce([ri.magnitude() for ri in diskNodes1.positions().internalValues()], mpi.SUM)
vx1 = mpi.reduce([v.x for v in diskNodes1.velocity().internalValues()], mpi.SUM)
vy1 = mpi.reduce([v.y for v in diskNodes1.velocity().internalValues()], mpi.SUM)
xprof2 = mpi.reduce([x.x for x in diskNodes2.positions().internalValues()], mpi.SUM)
yprof2 = mpi.reduce([y.y for y in diskNodes2.positions().internalValues()], mpi.SUM)
rhoprof2 = mpi.reduce(diskNodes2.massDensity().internalValues(), mpi.SUM)
Pprof2 = mpi.reduce(P2.internalValues(), mpi.SUM)
rprof2 = mpi.reduce([ri.magnitude() for ri in diskNodes2.positions().internalValues()], mpi.SUM)
vx2 = mpi.reduce([v.x for v in diskNodes2.velocity().internalValues()], mpi.SUM)
vy2 = mpi.reduce([v.y for v in diskNodes2.velocity().internalValues()], mpi.SUM)
np1 = int(diskNodes1.numInternalNodes)
np2 = int(diskNodes2.numInternalNodes)
if np1 is None:
np1 = 0
np1 = mpi.reduce(np1,mpi.SUM)
if np2 is None:
np2 = 0
np2 = mpi.reduce(np2,mpi.SUM)
vprof1 = []
vprof2 = []
if mpi.rank == 0:
for i in xrange(np1):
vprof1.append(xprof1[i]*vx1[i]/rprof1[i]+yprof1[i]*vy1[i]/rprof1[i])
for i in xrange(np2):
vprof2.append(xprof2[i]*vx2[i]/rprof2[i]+yprof2[i]*vy2[i]/rprof2[i])
mof = mortonOrderIndices(db)
mo1 = mpi.reduce(mof[0].internalValues(),mpi.SUM)
mo2 = mpi.reduce(mof[1].internalValues(),mpi.SUM)
if mpi.rank == 0:
multiSort(rprof1,mo1,xprof1,yprof1,rhoprof1,Pprof1,vprof1)
multiSort(rprof2,mo2,xprof2,yprof2,rhoprof2,Pprof2,vprof2)
f = open(outputFile, "w")
f.write("r x y rho P v mortonOrder\n")
for (ri, xi, yi, rhoi, Pi, vi, mi) in zip(rprof1,xprof1,yprof1,rhoprof1,Pprof1,vprof1,mo1):
f.write((7*"%16.12e "+"\n") % (ri,xi,yi,rhoi,Pi,vi,mi))
for (ri, xi, yi, rhoi, Pi, vi, mi) in zip(rprof2,xprof2,yprof2,rhoprof2,Pprof2,vprof2,mo2):
f.write((7*"%16.12e "+"\n") % (ri,xi,yi,rhoi,Pi,vi,mi))
f.close()
if comparisonFile != "None":
comparisonFile = os.path.join(dataDir, comparisonFile)
import filecmp
assert filecmp.cmp(outputFile,comparisonFile)
```
#### File: Hydro/Noh/NohAnalyticSolution.py
```python
class NohSolution:
# Constructor
def __init__(self, nDim,
r = None,
nPoints = 101,
gamma = 5.0/3.0,
R0 = 1.0,
rho0 = 1.0,
v0 = -1.0,
h0 = 2.01*0.02):
self.nDim = nDim
self.r = r
self.nPoints = nPoints
self.gamma = gamma
self.R0 = R0
self.rho0 = rho0
self.v0 = v0
self.h0 = h0
return
# Method to provide the analytic solution of the Noh problem at the
# requested time.
def solution(self, time,
r = None):
# If the user has not specified the desired r coordinates, compute
# them evenly between [0, R0].
if r is None:
if self.r is None:
assert self.nPoints > 1
R0 = self.R0 + self.v0*time
dr = R0/(self.nPoints - 1)
r = [i*dr for i in xrange(self.nPoints)]
else:
r = self.r
assert not r is None
# Prepare arrays for the values we're going to compute.
v = []
u = []
rho = []
P = []
h = []
# The current postion of the shock.
rshock = -time/3.0*self.v0
# Fill in the state values for each value of r.
for ri in r:
if abs(ri) <= rshock:
v.append(0.0)
u.append(0.5*self.v0**2)
rho.append(self.rho0*2.0**(2*self.nDim))
h.append(self.h0*(self.rho0/rho[-1])**(1.0/self.nDim))
else:
v.append(self.v0)
u.append(0.0)
rho.append(self.rho0*(1.0 - self.v0 * time/(abs(ri) + 1.0e-10))**(self.nDim - 1))
h.append(self.h0)
P.append((self.gamma - 1.0)*u[-1]*rho[-1])
return r, v, u, rho, P, h
# For higher dimensional cases, compute the ideal radial and
# tangential smoothing scales.
def hrtsolution(self, time):
# First get the standard solution.
r, v, u, rho, P, h = self.solution(time)
# The current postion of the shock.
rshock = -time/3.0*self.v0
# For each position, compute them h's.
hr = []
ht = []
for ri, rhoi in zip(r, rho):
if ri <= rshock:
hs = self.h0*(self.rho0/rhoi)**(1.0/self.nDim)
hr.append(hs)
ht.append(hs)
else:
hr.append(self.h0)
ht.append(self.h0*(self.rho0/rhoi)**(1.0/(self.nDim - 1)))
# Post-conditions.
assert len(hr) == len(r)
assert len(ht) == len(r)
return r, hr, ht
```
#### File: Hydro/Noh/testQ.py
```python
import Gnuplot
from Spheral import *
################################################################################
def testQlimit(Q, nodes, nodeID):
neighbor = nodes.neighbor
neighbor.setMasterList(nodeID)
neighbor.setRefineNeighborList(nodeID)
pos = nodes.positions
vel = nodes.velocity
DvDx = nodes.DvelocityDx
H = nodes.Hfield
x = Numeric.array([0.0]*neighbor.numRefine)
y = Numeric.array([0.0]*neighbor.numRefine)
vx = Numeric.array([0.0]*neighbor.numRefine)
vy = Numeric.array([0.0]*neighbor.numRefine)
Qvx = Numeric.array([0.0]*neighbor.numRefine)
Qvy = Numeric.array([0.0]*neighbor.numRefine)
velFraction = Numeric.array([0.0]*neighbor.numRefine)
i = 0
for neighborID in neighbor.refineNeighborList:
ri = pos[nodeID]
rj = pos[neighborID]
vi = vel[nodeID]
vj = vel[neighborID]
rij = Vector2d(ri.x - rj.x, ri.y - rj.y)
rijNorm = rij.unitVector()
rji = Vector2d(rj.x - ri.x, rj.y - ri.y)
rjiNorm = rji.unitVector()
vij = Vector2d(vi.x - vj.x, vi.y - vj.y)
sigi = DvDx[nodeID]
sigj = DvDx[neighborID]
Hi = H[nodeID]
Hj = H[neighborID]
DelVeli = (sigi.dotvec(rijNorm)).magnitude() / (Hi.dotvec(rijNorm)).magnitude();
DelVelj = (sigj.dotvec(rijNorm)).magnitude() / (Hj.dotvec(rijNorm)).magnitude();
fi = Q.limitSigma(vi, vj, rij, rijNorm, sigi, sigj);
fj = Q.limitSigma(vj, vi, rji, rjiNorm, sigj, sigi);
dvi = sigi.dotvec(rij)
dvj = sigj.dotvec(rij)
correctedVij = Vector2d(vij.x - 0.5*(fi*dvi.x + fj*dvj.x),
vij.y - 0.5*(fi*dvi.y + fj*dvj.y))
x[i] = rj.x
y[i] = rj.y
vx[i] = -(vij.x)
vy[i] = -(vij.y)
Qvx[i] = -(correctedVij.x)
Qvy[i] = -(correctedVij.y)
velFraction[i] = correctedVij.magnitude() / (vij.magnitude() + 1e-30)
i = i + 1
return x, y, vx, vy, Qvx, Qvy, velFraction
################################################################################
def plotQvij(x, y, vx, vy,
plot = Gnuplot.Gnuplot()):
data = Gnuplot.Data(x, y, vx, vy,
with = 'vector')
plot.replot(data)
plot('set size square')
return plot
```
#### File: Hydro/Riemann/RiemannSolution.py
```python
from math import *
import numpy as np
import argparse
# Several standard tests listed as (x0, x1, xdiaph, gamma_gas, out_time, dl, vl, pl, dr, vr, pr)
# The bounds are chosen so that a good comparison can be made in the range x \in [0,1]
Riemann_packaged_problems = {
"sod" : ( 0.0, 1.0, 0.5, 1.4, 0.20, 1.0, 0.0, 1.0, 0.125, 0.0, 0.1), # TEST 1 (Modified Sod)
"123" : ( 0.0, 1.0, 0.5, 1.4, 0.15, 1.0, -2.0, 0.4, 1.0, 2.0, 0.4), # TEST 2 (123 problem)
"leftwc" : ( 0.0, 1.0, 0.5, 1.4, 0.012, 1.0, 0.0, 1000.0, 1.0, 0.0, 0.01), # TEST 3 (Left Woodward & Colella)
"2shock_collision" : (-1.0, 2.0, 0.4, 1.4, 0.035, 5.99924, 19.5975, 460.894, 5.99242, -6.19633, 46.0950), # TEST 4 (Collision of 2 shocks)
"stationary_contact" : (-0.5, 1.5, 0.8, 1.4, 0.012, 1.0, -19.59745, 1000.0, 1.0, -19.59745, 0.01), # TEST 5 (Stationary contact)
"slow_shock" : (-2.0, 8.0, 0.5, 1.4, 1.0, 3.857143, -0.810631, 10.33333, 1.0, -3.44, 1.0), # TEST 6 (Slow shock)
"shock_contact_shock" : (-1.0, 2.0, 0.5, 1.4, 0.3, 1.0, 0.5, 1.0, 1.25, -0.5, 1.0), # TEST 7 (Shock-Contact-Shock)
"leblanc" : ( 0.0, 1.0, 0.3, 1.4, 0.5, 1.0, 0.0, 2.0e-1/3.0, 0.01, 0.0, 2.0e-10/3.0), # TEST 8 (LeBlanc)
}
#-------------------------------------------------------------------------------
# The main object.
#-------------------------------------------------------------------------------
class RiemannSolution:
def __init__(self,
problem = "Sod", # ("", "Sod", "123", "Stationary_contact", "Slow_shock", "Slow_contact_shock", "LeBlanc")
n = 1000, # number of points in evaluating exact solution
x0 = None, # box min coordinate
x1 = None, # box max coordinate
xdiaph = None, # position of diaphragm xdiaph \in [x0, x1]
gamma_gas = None, # ratio of specific heats
out_time = None, # default time of solution
dl = None, # density (left state)
vl = None, # velocity (left state)
pl = None, # pressure (left state)
hl = None, # smoothing scale (left state)
dr = None, # density (right state)
vr = None, # velocity (right state)
pr = None, # pressure (right state)
hr = None): # smoothing scale (right state)
assert problem or (x0 and x1 and out_time and xdiaph and gamma_gas and dl and vl and pl and dr and vr and pr)
# Get the ICs.
if problem:
assert problem.lower() in Riemann_packaged_problems
_x0, _x1, _xdiaph, _gamma_gas, _out_time, _dl, _vl, _pl, _dr, _vr, _pr = Riemann_packaged_problems[problem.lower()]
if x0 is None:
x0 = _x0
if x1 is None:
x1 = _x1
if xdiaph is None:
xdiaph = _xdiaph
if gamma_gas is None:
gamma_gas = _gamma_gas
if out_time is None:
out_time = _out_time
if dl is None:
dl = _dl
if vl is None:
vl = _vl
if pl is None:
pl = _pl
if dr is None:
dr = _dr
if vr is None:
vr = _vr
if pr is None:
pr = _pr
# Store the variables
self.n = n
self.x0 = x0
self.x1 = x1
self.xdiaph = xdiaph
self.gamma_gas = gamma_gas
self.out_time = out_time
self.dl = dl
self.vl = vl
self.pl = pl
self.hl = hl
self.dr = dr
self.vr = vr
self.pr = pr
self.hr = hr
return
#---------------------------------------------------------------------------
# Compute the solution.
#---------------------------------------------------------------------------
def solution(self,
time = None,
x = None):
n = self.n
x0 = self.x0
x1 = self.x1
xdiaph = self.xdiaph
gamma_gas = self.gamma_gas
out_time = self.out_time
dl = self.dl
vl = self.vl
pl = self.pl
hl = self.hl
dr = self.dr
vr = self.vr
pr = self.pr
hr = self.hr
# Solution time
if not time is None:
out_time = time
else:
out_time = self.out_time
# Sampling positions
if x is None:
assert n > 0
assert x1 > x0
x = np.linspace(x0, x1, n)
else:
n = len(x)
# Did we get the initial (left, right) h?
if hl is None:
hl = x[1] - x[0]
if hr is None:
hr = x[-1] - x[-2]
assert hl > 0 and hr > 0
# compute gamma related constants
g1 = (gamma_gas - 1.0)/(2.0*gamma_gas)
g2 = (gamma_gas + 1.0)/(2.0*gamma_gas)
g3 = 2.0*gamma_gas/(gamma_gas - 1.0)
g4 = 2.0/(gamma_gas - 1.0)
g5 = 2.0/(gamma_gas + 1.0)
g6 = (gamma_gas - 1.0)/(gamma_gas + 1.0)
g7 = (gamma_gas - 1.0)/2.0
g8 = gamma_gas - 1.0
# compute sound speeds
cl = sqrt(gamma_gas*pl/dl)
cr = sqrt(gamma_gas*pr/dr)
#---------------------------------------------------------------------------
# purpose: to provide a guessed value for pressure
# pm in the Star Region. The choice is made
# according to adaptive Riemann solver using
# the PVRS, TRRS and TSRS approximate
# Riemann solvers. See Sect. 9.5 of Chapt. 9 of Ref. 1
#---------------------------------------------------------------------------
def guessp():
quser = 2.0
# compute guess pressure from PVRS Riemann solver
cup = 0.25*(dl + dr)*(cl + cr)
ppv = 0.5*(pl + pr) + 0.5*(vl - vr)*cup
ppv = max(0.0, ppv)
pmin = min(pl, pr)
pmax = max(pl, pr)
qmax = pmax/pmin
if (qmax <= quser and (pmin <= ppv and ppv <= pmax)):
pm = ppv # select PVRS Riemann solver
else:
if (ppv < pmin):
# select Two-Rarefaction Riemann solver
pq = pow(pl/pr, g1)
vm = (pq*vl/cl + vr/cr + g4*(pq - 1.0))/(pq/cl + 1.0/cr)
ptl = 1.0 + g7*(vl - vm)/cl
ptr = 1.0 + g7*(vm - vr)/cr
pm = 0.5*(pow(pl*ptl, g3) + pow(pr*ptr, g3))
else:
# select Two-Shock Riemann solver with PVRS as estimate
gel = sqrt((g5/dl)/(g6*pl + ppv))
ger = sqrt((g5/dr)/(g6*pr + ppv))
pm = (gel*pl + ger*pr - (vr - vl))/(gel + ger)
return pm
#---------------------------------------------------------------------------
# purpose: to evaluate the pressure functions
# fl and fr in exact Riemann solver
# and their first derivatives
#---------------------------------------------------------------------------
def prefun(p, dk, pk, ck):
if (p <= pk):
# rarefaction wave
pratio = p/pk
f = g4*ck*(pow(pratio, g1) - 1.0)
fd = (1.0/(dk*ck))*pow(pratio, -g2)
else:
# shock wave
ak = g5/dk
bk = g6*pk
qrt = sqrt(ak/(bk + p))
f = (p - pk)*qrt
fd = (1.0 - 0.5*(p - pk)/(bk + p))*qrt
return f, fd
#---------------------------------------------------------------------------
# purpose: to compute the solution for pressure and
# velocity in the Star Region
#---------------------------------------------------------------------------
def starpu(pscale):
nriter = 20
tolpre = 1.0e-6
# guessed value pstart is computed
pstart = guessp()
pold = pstart
udiff = vr - vl
print ("----------------------------------------\n"
" Iteration number Change\n"
"----------------------------------------")
i = 1
change = 10.0*tolpre
while i <= nriter and change > tolpre:
fl, fld = prefun(pold, dl, pl, cl)
fr, frd = prefun(pold, dr, pr, cr)
p = pold - (fl + fr + udiff)/(fld + frd)
change = 2.0*abs((p - pold)/(p + pold))
print '\t', i, "\t\t", change
if (p < 0.0):
p = tolpre
pold = p
i += 1
if (i > nriter):
print "divergence in Newton-Raphson iteration"
# compute velocity in star region
u = 0.5*(vl + vr + fr - fl)
print "----------------------------------------\n" \
" Pressure Velocity\n" \
"----------------------------------------\n" \
" ", p/pscale, "\t\t", u, '\n' \
"----------------------------------------"
return p, u
#---------------------------------------------------------------------------
# purpose: to sample the solution throughout the wave
# pattern. Pressure pm and velocity vm in the
# star region are known. Sampling is performed
# in terms of the 'speed' s = x/t. Sampled
# values are d, v, p
#---------------------------------------------------------------------------
def sample(pm, vm, s):
if (s <= vm):
# sampling point lies to the left of the contact discontinuity
if (pm <= pl):
# left rarefaction
shl = vl - cl
if (s <= shl):
# sampled point is left data state
d = dl
v = vl
p = pl
h = hl
else:
cml = cl*pow(pm/pl, g1)
stl = vm - cml
if (s > stl):
# sampled point is star left state
d = dl*pow(pm/pl, 1.0/gamma_gas)
v = vm
p = pm
h = hl*dl/d
else:
# sampled point is inside left fan
v = g5*(cl + g7*vl + s)
c = g5*(cl + g7*(vl - s))
d = dl*pow(c/cl, g4)
p = pl*pow(c/cl, g3)
h = hl*dl/d
else:
# left shock
pml = pm/pl
sl = vl - cl*sqrt(g2*pml + g1)
if (s <= sl):
# sampled point is left data state
d = dl
v = vl
p = pl
h = hl
else:
# sampled point is star left state
d = dl*(pml + g6)/(pml*g6 + 1.0)
v = vm
p = pm
h = hl*dl/d
else:
# sampling point lies to the right of the contact discontinuity
if (pm > pr):
# right shock
pmr = pm/pr
sr = vr + cr*sqrt(g2*pmr + g1)
if (s >= sr):
# sampled point is right data state
d = dr
v = vr
p = pr
h = hr
else:
# sampled point is star right state
d = dr*(pmr + g6)/(pmr*g6 + 1.0)
v = vm
p = pm
h = hr*dr/d
else:
# right rarefaction
shr = vr + cr
if (s >= shr):
# sampled point is right data state
d = dr
v = vr
p = pr
h = hr
else:
cmr = cr*pow(pm/pr, g1)
str = vm + cmr
if (s <= str):
# sampled point is star right state
d = dr*pow(pm/pr, 1.0/gamma_gas)
v = vm
p = pm
h = hr*dr/d
else:
# sampled point is inside left fan
v = g5*(-cr + g7*vr + s)
c = g5*(cr - g7*(vr - s))
d = dr*pow(c/cr, g4)
p = pr*pow(c/cr, g3)
h = hr*dr/d
return d, v, p, h
# the pressure positivity condition is tested for
if (g4*(cl+cr) <= (vr-vl)):
raise RunTimeError, ("the initial data is such that vacuum is generated"
"\nstopping program")
# exact solution for pressure and velocity in star region is found
pm, vm = starpu(1.0)
# complete solution at time out_time is found
d = np.empty(n)
v = np.empty(n)
p = np.empty(n)
eps = np.empty(n)
A = np.empty(n)
h = np.empty(n)
for i in xrange(n):
s = (x[i] - xdiaph)/max(1e-10, out_time)
ds, vs, ps, hs = sample(pm, vm, s)
d[i] = ds
v[i] = vs
p[i] = ps
eps[i] = ps/(g8*ds)
A[i] = ps/pow(ds, gamma_gas)
h[i] = hs
return x, v, eps, d, p, A, h
#-------------------------------------------------------------------------------
# Provide a way to call this script as a standalone executable.
#-------------------------------------------------------------------------------
if __name__ == "__main__":
ap = argparse.ArgumentParser(description = "Compute the Riemann solution, with optional output to a file or plotted to the screen.")
ap.add_argument("--problem",
default = "Sod",
help = """
Use one of the canned Riemann initial conditions: (Sod, 123, Stationary_contact, Slow_shock, Slow_contact_shock, LeBlanc).
If specified as the empty string "" (or None), the full state must be specified explicitly.""")
ap.add_argument("--n",
default = 1000,
type = int,
help = "Number of points to generate in the solution.")
ap.add_argument("--x0",
default = None,
type = float,
help = "Minimum spatial coordinate in the tube.")
ap.add_argument("--x1",
default = None,
type = float,
help = "Maximum spatial coordinate in the tube.")
ap.add_argument("--xdiaph",
default = None,
type = float,
help = "Coordinate of the diaphragm.")
ap.add_argument("--gamma_gas",
default = None,
type = float,
help = "Ratio of specific heats.")
ap.add_argument("--out_time",
default = None,
type = float,
help = "Solution time.")
ap.add_argument("--dl",
default = None,
type = float,
help = "Initial density for left state.")
ap.add_argument("--vl",
default = None,
type = float,
help = "Initial velocity for left state.")
ap.add_argument("--pl",
default = None,
type = float,
help = "Initial pressure for left state.")
ap.add_argument("--hl",
default = None,
type = float,
help = "Initial smoothing scale for left state.")
ap.add_argument("--dr",
default = None,
type = float,
help = "Initial density for right state.")
ap.add_argument("--vr",
default = None,
type = float,
help = "Initial velocity for right state.")
ap.add_argument("--pr",
default = None,
type = float,
help = "Initial pressure for right state.")
ap.add_argument("--hr",
default = None,
type = float,
help = "Initial smoothing scale for right state.")
ap.add_argument("--file",
default = None,
help = "Write profiles to given file.")
ap.add_argument("--noheader",
action = "store_true",
help = "Do not write a header at the top of the output file.")
ap.add_argument("--plot",
action = "store_true",
help = "Plot the profiles to the screen.")
ap.add_argument("--plotsize",
default = 10,
type = float,
help = "Set the size of the figure (in inches) when plotting.")
args = ap.parse_args()
globals().update(vars(args))
# Compute the solution.
answer = RiemannSolution(problem = problem,
n = n,
x0 = x0,
x1 = x1,
xdiaph = xdiaph,
gamma_gas = gamma_gas,
out_time = out_time,
dl = dl,
vl = vl,
pl = pl,
hl = hl,
dr = dr,
vr = vr,
pr = pr,
hr = hr)
x, v, eps, d, p, A, h = answer.solution(time = out_time)
# Write the output to a text file.
if file:
with open(file, "w") as f:
# Write a header
if not noheader:
f.write(
"""# Output from RiemannSolution using the arguments:
# problem = %(problem)s
# n = %(n)s
# x0 = %(x0)s
# x1 = %(x0)s
# xdiaph = %(x0)s
# gamma_gas = %(x0)s
# out_time = %(out_time)s
# dl, vl, pl, hl = %(dl)s, %(vl)s, %(pl)s, %(hl)s
# dr, vr, pr = %(dr)s, %(vr)s, %(pr)s, %(hr)s
#
# x rho vel P eps A h
""" % {"problem" : problem,
"n" : n,
"x0" : x0,
"x1" : x1,
"xdiaph" : xdiaph,
"gamma_gas" : gamma_gas,
"out_time" : out_time,
"dl" : dl,
"vl" : vl,
"pl" : pl,
"hl" : hl,
"dr" : dr,
"vr" : vr,
"pr" : pr,
"hr" : hr})
for xi, di, vi, pi, epsi, Ai, hi in zip(x, d, v, p, eps, A, h):
f.write((7*"%20.17e ") % (xi, di, vi, pi, epsi, Ai, hi) + "\n")
# Plot the results to the screen via matplotlib (if available)
if plot:
try:
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(plotsize, 2.0/3.0*plotsize))
axes = []
for i, (q, label) in enumerate([(d, "Density"),
(v, "Velocity"),
(p, "Pressure"),
(eps, "Specific Thermal Energy"),
(A, "Entropy"),
(h, "Smoothing scale")]):
axes.append(fig.add_subplot(2, 3, i + 1))
plt.plot(x, q, linewidth=3)
plt.title(label)
qmin = min(q)
qmax = max(q)
qdiff = qmax - qmin
axes[i].set_ylim(qmin - 0.1*qdiff, qmax + 0.1*qdiff)
plt.show()
except:
print "ERROR: unable to import matplotlib for graphics."
pass
```
#### File: Hydro/Sedov/SedovAnalyticSolution.py
```python
from math import *
import bisect
def sgn(x):
if x < 0:
return -1
else:
return 1
#-------------------------------------------------------------------------------
# Numerical integration routines, based on Numerical recipes.
#-------------------------------------------------------------------------------
def trapzd(func, a, b, s, n):
assert b >= a
assert n >= 1
if n == 1:
return 0.5*(b - a)*(func(a) + func(b))
else:
it = 2**(n - 2)
delta = (b - a)/it
result = 0.0
for j in xrange(it):
result += func(a + (j + 0.5)*delta)
return 0.5*(s + (b - a)*result/it)
def polint(xa, ya, x):
n = len(xa)
assert len(ya) == n
c = ya[:]
d = ya[:]
ns = 0
dif = abs(x - xa[0])
for i in xrange(1, n):
dift = abs(x - xa[i])
if dift < dif:
ns = i
dif = dift
y = ya[ns]
ns -= 1
for m in xrange(1, n - 1):
for i in xrange(n - m):
ho = xa[i] - x
hp = xa[i + m] - x
w = c[i + 1] - d[i]
den = ho - hp
if den == 0.0:
raise "Failure in polint"
den = w/den
d[i] = hp*den
c[i] = ho*den
if 2*ns < n - m - 1:
dy = c[ns + 1]
else:
dy = d[ns]
ns -= 1
y += dy
return y, dy
def qromb(func, a, b,
eps = 1.0e-10,
maxIters = 50,
K = 5):
KM = K - 1
h = [0.0]*(maxIters + 1)
s = [0.0]*(maxIters + 1)
h[0] = 1.0
for j in xrange(maxIters):
jj = j + 1
s[j] = trapzd(func, a, b, s[j], jj)
if jj >= K:
ss, dss = polint(h[j - KM:jj], s[j - KM:jj], 0.0)
if abs(dss) <= eps*abs(ss):
return ss
s[jj] = s[j]
h[jj] = 0.25*h[j]
raise "Too many iterations in qromb"
#-------------------------------------------------------------------------------
# SedovSolution : Main class
#-------------------------------------------------------------------------------
class SedovSolution:
#---------------------------------------------------------------------------
# Constructor
#---------------------------------------------------------------------------
def __init__(self,
nDim,
gamma = 1.4,
rho0 = 1.0,
E0 = 1.0,
h0 = 2.01*0.02,
nbins = 10001,
accuracy = 1e-6):
self.nu = nDim
self.gamma = gamma
self.rho0 = rho0
self.E0 = E0
self.h0 = h0
self.nbins = nbins
self.accuracy = accuracy
# Store the alpha exponent constants.
self.a1 = self.computeAlpha1(self.nu, self.gamma)
self.a2 = self.computeAlpha2(self.nu, self.gamma)
self.a3 = self.computeAlpha3(self.nu, self.gamma)
self.a4 = self.computeAlpha4(self.nu, self.gamma)
self.a5 = self.computeAlpha5(self.nu, self.gamma)
self.a6 = self.computeAlpha6(self.nu, self.gamma)
self.a7 = self.computeAlpha7(self.nu, self.gamma)
# Compute an internal table with the dimensionless solution.
self._lam, self._vlam, self._rholam, self._Plam = \
self.computeDimensionlessTable(self.nu, self.gamma, nbins)
# Compute the energy alpha constant.
self.alpha = self.computeAlpha(nbins)
assert self.alpha > 0.0
# Compute E
self.E = self.E0/self.alpha
return
#---------------------------------------------------------------------------
# Properties at the shock front.
#---------------------------------------------------------------------------
def shockState(self, t):
gamma = self.gamma
nu = self.nu
E = self.E
rho1 = self.rho0
nu1 = 1.0/(nu + 2.0)
nu2 = 2.0*nu1
t1 = t**nu1
t2 = t**nu2
vs = nu2 * (E/rho1)**nu1
if t != 0.0:
vs /= t**(nu/(nu + 2.0))
r2 = (E/rho1)**nu1 * t2
v2 = 2.0/(gamma + 1.0)*vs
rho2 = (gamma + 1.0)/(gamma - 1.0)*rho1
P2 = 2.0/(gamma + 1)*rho1*vs*vs
return vs, r2, v2, rho2, P2
#---------------------------------------------------------------------------
# Compute the solution at the given positions.
#---------------------------------------------------------------------------
def solution(self, t,
r = None):
gamma = self.gamma
gam1 = gamma - 1.0
nu = self.nu
vs, r2, v2, rho2, P2 = self.shockState(t)
if r is None:
r = [0.01*r2*i for i in xrange(101)]
v = []
rho = []
P = []
u = []
h = []
A = []
for ri in r:
if abs(ri) < r2:
vi, rhoi, Pi = self.lookupSolution(abs(ri/r2))
else:
vi, rhoi, Pi = 0.0, self.rho0/rho2, 0.0
v.append(vi*v2*sgn(ri))
rho.append(rhoi*rho2)
P.append(Pi*P2)
u.append(P[-1]/(gam1*rho[-1] + 1.0e-50))
h.append(self.h0 * self.rho0/(rho[-1] + 1.0e-50))
A.append(P[-1]/max(1.0e-30, rho[-1])**gamma)
return r, v, u, rho, P, A, h
#---------------------------------------------------------------------------
# alpha1
#---------------------------------------------------------------------------
def computeAlpha1(self, nu, gamma):
return (((nu + 2.0)*gamma)/(2.0 + nu*(gamma - 1.0)) *
(2.0*nu*(2.0 - gamma)/(gamma*(nu + 2)**2) - self.computeAlpha2(nu, gamma)))
#---------------------------------------------------------------------------
# alpha2
#---------------------------------------------------------------------------
def computeAlpha2(self, nu, gamma):
return (1.0 - gamma)/(2.0*(gamma - 1.0) + nu)
#---------------------------------------------------------------------------
# alpha3
#---------------------------------------------------------------------------
def computeAlpha3(self, nu, gamma):
return nu/(2.0*(gamma - 1.0) + nu)
#---------------------------------------------------------------------------
# alpha4
#---------------------------------------------------------------------------
def computeAlpha4(self, nu, gamma):
return self.computeAlpha1(nu, gamma)*(nu + 2.0)/(2.0 - gamma)
#---------------------------------------------------------------------------
# alpha5
#---------------------------------------------------------------------------
def computeAlpha5(self, nu, gamma):
return 2.0/(gamma - 2.0)
#---------------------------------------------------------------------------
# alpha6
#---------------------------------------------------------------------------
def computeAlpha6(self, nu, gamma):
return gamma/(2.0*(gamma - 1.0) + nu)
#---------------------------------------------------------------------------
# alpha7
#---------------------------------------------------------------------------
def computeAlpha7(self, nu, gamma):
return (2.0 + nu*(gamma - 1))*self.computeAlpha1(nu, gamma)/(nu*(2.0 - gamma))
#---------------------------------------------------------------------------
# lam (r/r2)
#---------------------------------------------------------------------------
def lam(self, V):
nu = self.nu
gamma = self.gamma
a1 = self.a1
a2 = self.a2
return ((0.25*(nu + 2.0)*(gamma + 1)*V)**(-2.0/(2.0 + nu)) *
((gamma + 1)/(gamma - 1.0) * (0.5*(nu + 2.0)*gamma*V - 1.0))**(-a2) *
((nu + 2.0)*(gamma + 1.0)/((nu + 2.0)*(gamma + 1.0) - 2.0*(2.0 + nu*(gamma - 1.0))) *
(1.0 - 0.5*(2.0 + nu*(gamma - 1.0))*V))**(-a1)
)
#---------------------------------------------------------------------------
# vlambda (v/v2)
#---------------------------------------------------------------------------
def vlambda(self, V):
nu = self.nu
gamma = self.gamma
return 0.25*(nu + 2.0)*(gamma + 1.0) * V * self.lam(V)
#---------------------------------------------------------------------------
# rholambda (rho/rho2)
#---------------------------------------------------------------------------
def rholambda(self, V):
nu = self.nu
gamma = self.gamma
a3 = self.a3
a4 = self.a4
a5 = self.a5
return (((gamma + 1.0)/(gamma - 1.0)*(0.5*(nu + 2.0)*gamma*V - 1.0))**a3 *
((gamma + 1.0)/(gamma - 1.0)*(1.0 - 0.5*(nu + 2.0)*V))**a5 *
((nu + 2.0)*(gamma + 1.0)/((2.0 + nu)*(gamma + 1.0) - 2.0*(2.0 + nu*(gamma - 1.0))) *
(1.0 - 0.5*(2.0 + nu*(gamma - 1.0))*V))**a4)
#---------------------------------------------------------------------------
# Plambda (P/P2)
#---------------------------------------------------------------------------
def Plambda(self, V):
nu = self.nu
gamma = self.gamma
a1 = self.a1
a4 = self.a4
a5 = self.a5
return ((0.25*(nu + 2.0)*(gamma + 1.0)*V)**(2.0*nu/(2.0 + nu)) *
((gamma + 1.0)/(gamma - 1.0)*(1.0 - 0.5*(nu + 2.0)*V))**(a5 + 1.0) *
((nu + 2.0)*(gamma + 1.0)/((nu + 2.0)*(gamma + 1.0) - 2.0*(2.0 + nu*(gamma - 1.0))) *
(1.0 - 0.5*(2.0 + nu*(gamma - 1.0))*V))**(a4 - 2.0*a1))
#---------------------------------------------------------------------------
# The range of the dimensionless velocity variable V.
#---------------------------------------------------------------------------
def Vrange(self, nu, gamma):
assert gamma > 1.0
assert nu in (1, 2, 3)
if (nu in (1, 2)) or (nu == 3 and gamma < 7.0):
return (2.0/((nu + 2.0)*gamma), 4.0/((nu + 2.0)*(gamma + 1.0)))
else:
return (4.0/(5.0*(gamma + 1.0)), 2.0/5.0)
#---------------------------------------------------------------------------
# The dimension dependent volume constant.
#---------------------------------------------------------------------------
def Anu(self, nu):
if nu == 1:
return 2.0
elif nu == 2:
return 2.0*pi
elif nu == 3:
return 4.0*pi
else:
assert False
#---------------------------------------------------------------------------
# Compute a tabular form of the dimensionless solution.
#---------------------------------------------------------------------------
def computeDimensionlessTable(self, nu, gamma, nbins):
assert nbins > 1
gam1 = gamma - 1.0
Vmin, Vmax = self.Vrange(nu, gamma)
dV = (Vmax - Vmin)/(nbins - 1)
lam = []
vlam = []
rholam = []
Plam = []
for i in xrange(nbins):
V = Vmin + i*dV
lam.append(self.lam(V))
vlam.append(self.vlambda(V))
rholam.append(self.rholambda(V))
Plam.append(self.Plambda(V))
assert len(lam) == nbins
assert len(vlam) == nbins
assert len(rholam) == nbins
assert len(Plam) == nbins
assert min([x >= 0.0 and x <= 1.0 for x in lam]) == True
assert lam[0] < 1.0e-10 and lam[-1] > 1.0 - 1.0e-10
lam[0] = 0.0
lam[-1] = 1.0
return lam, vlam, rholam, Plam
#---------------------------------------------------------------------------
# Peform a linear interpolation into the given table.
#---------------------------------------------------------------------------
def interp(self, x, xtab, ytab, imin, imax):
return ytab[imin] + ((ytab[imax] - ytab[imin])/
(xtab[imax] - xtab[imin] + 1.0e-50)*
(x - xtab[imin]))
#---------------------------------------------------------------------------
# Interpolate for the solution at a given lambda \in [0,1].
#---------------------------------------------------------------------------
def lookupSolution(self, x):
assert x >= 0.0 and x <= 1.0
# Bracket this x in the table.
imin = max(0, min(self.nbins - 1, bisect.bisect(self._lam, x) - 1))
imax = min(imin + 1, self.nbins - 1)
assert imin >= 0 and imin < self.nbins
assert imax >= 0 and imax < self.nbins
# Now we can interpolate the values.
v = self.interp(x, self._lam, self._vlam, imin, imax)
rho = self.interp(x, self._lam, self._rholam, imin, imax)
P = self.interp(x, self._lam, self._Plam, imin, imax)
return v, rho, P
#---------------------------------------------------------------------------
# Numerically integrate the alpha constant.
#---------------------------------------------------------------------------
def computeAlpha(self, accuracy):
assert accuracy > 0.0
gamma = self.gamma
nu = self.nu
Anu = self.Anu(nu)
thpt = qromb(self.func, 0.0, 1.0, accuracy)
return 8.0*Anu/((gamma - 1.0)*(gamma + 1.0)*(nu + 2.0)**2) * thpt
#---------------------------------------------------------------------------
# The integrand function for integrating alpha
#---------------------------------------------------------------------------
def func(self, x):
assert x >= 0.0 and x <= 1.0
v0, rho0, P0 = self.lookupSolution(x)
return (rho0*v0**2 + P0) * x**(self.nu - 1)
## #---------------------------------------------------------------------------
## # Numerically integrate the alpha constant.
## #---------------------------------------------------------------------------
## def computeAlpha(self, nbins, f):
## assert nbins > 1
## nu = self.nu
## nu1 = nu - 1
## gamma = self.gamma
## Anu = self.Anu(nu)
## # First figure out our geometric ratio sizing base.
## fsum = 0.0
## for i in xrange(nbins):
## fsum += f**i
## assert fsum > 0.0
## dlambda0 = 1.0/fsum
## result = 0.0
## lambdasum = 0.0
## for i in xrange(nbins):
## dlambdai = dlambda0 * f**i
## lam0 = lambdasum
## lam1 = min(1.0, lambdasum + dlambdai)
## assert lam0 >= 0.0 and lam0 <= 1.0
## assert lam1 >= 0.0 and lam1 <= 1.0
## v0, rho0, P0 = self.lookupSolution(lam0)
## v1, rho1, P1 = self.lookupSolution(lam0)
## val0 = (rho0*v0**2 + P0)*lam0**nu1
## val1 = (rho1*v1**2 + P1)*lam1**nu1
## result += 0.5*(val0 + val1)*(lam1 - lam0)
## lambdasum = lam1
## assert abs(1.0 - lambdasum) < 1.0e-10
## result *= 8.0*Anu/((gamma - 1.0)*(gamma + 1.0)*(nu + 2.0)**2)
## return result # 1.1*result
## #---------------------------------------------------------------------------
## # Numerically integrate the alpha constant.
## #---------------------------------------------------------------------------
## def computeAlpha(self, nbins, f):
## assert nbins > 1
## nu = self.nu
## nu1 = nu - 1
## gamma = self.gamma
## Anu = self.Anu(nu)
## # First figure out our geometric ratio sizing base.
## fsum = 0.0
## for i in xrange(nbins):
## fsum += f**i
## assert fsum > 0.0
## dlambda0 = 1.0/fsum
## result = 0.0
## lambdasum = 0.0
## for i in xrange(nbins):
## dlambdai = dlambda0 * f**i
## lam0 = lambdasum
## lam1 = min(1.0, lambdasum + dlambdai)
## assert lam0 >= 0.0 and lam0 <= 1.0
## assert lam1 >= 0.0 and lam1 <= 1.0
## v0, rho0, P0 = self.lookupSolution(lam0)
## v1, rho1, P1 = self.lookupSolution(lam0)
## val0 = (rho0*v0**2 + P0)*lam0**nu1
## val1 = (rho1*v1**2 + P1)*lam1**nu1
## result += 0.5*(val0 + val1)*(lam1 - lam0)
## lambdasum = lam1
## assert abs(1.0 - lambdasum) < 1.0e-10
## result *= 8.0*Anu/((gamma - 1.0)*(gamma + 1.0)*(nu + 2.0)**2)
## return result # 1.1*result
## #---------------------------------------------------------------------------
## # Numerically integrate the alpha constant.
## #---------------------------------------------------------------------------
## def computeAlpha(self, nbins):
## assert nbins > 1
## nu = self.nu
## nu1 = nu - 1
## gamma = self.gamma
## Anu = self.Anu(nu)
## Vmin, Vmax = self.Vrange(nu, gamma)
## dV = (Vmax - Vmin)/nbins
## result = 0.0
## for i in xrange(nbins):
## V0 = Vmin + i*dV
## V1 = min(Vmax, Vmin + (i + 1)*dV)
## assert V0 >= Vmin and V0 <= Vmax
## assert V1 >= Vmin and V1 <= Vmax
## lam0 = self.lam(V0)
## lam1 = self.lam(V1)
## dlambda = lam1 - lam0
## assert dlambda > 0.0
## val0 = (self.rholambda(V0)*self.vlambda(V0)**2 + self.Plambda(V0))*lam0**nu1
## val1 = (self.rholambda(V1)*self.vlambda(V1)**2 + self.Plambda(V1))*lam1**nu1
## result += 0.5*(val0 + val1)*dlambda
## result *= 8.0*Anu/((gamma - 1.0)*(gamma + 1.0)*(nu + 2.0)**2)
## return result
```
#### File: MHD/Dedner-divB/Dedner-divB.py
```python
from math import *
from Spheral import *
from SpheralTestUtilities import *
from SpheralVisitDump import dumpPhysicsState
from findLastRestart import *
# Load the mpi module if we"re parallel.
import loadmpi
mpi, procID, numProcs = loadmpi.loadmpi()
from GenerateNodeDistribution3d import *
from CubicNodeGenerator import GenerateCubicNodeDistribution
title("Dedner magnetic divergence test")
#-------------------------------------------------------------------------------
# Generic problem parameters
#-------------------------------------------------------------------------------
commandLine(seed = "lattice",
n = 20,
rho0 = 1.0,
V0 = Vector3d(1.0, 1.0, 0.0),
Bz = 1.0/sqrt(4*pi),
P0 = 6.0,
nPerh = 1.3,
mu0 = 1.0,
gamma = 5.0/3.0,
r0 = 1.0/sqrt(8),
divBCleaner = 'none',
mu = 1.0,
Qlimiter = True,
balsaraCorrection = False,
epsilon2 = 1e-2,
negligibleSoundSpeed = 1e-5,
csMultiplier = 1e-4,
hmin = 1e-5,
hmax = 1.0,
hminratio = 0.05,
HsmoothFraction = 0.0,
cfl = 0.25,
XSPH = True,
epsilonTensile = 0.0,
nTensile = 8,
HEvolution = Hydro3d.HEvolutionType.IdealH,
compatibleEnergy = False,
gradhCorrection = True,
limitIdealH = False,
neighborSearchType = Neighbor3d.NeighborSearchType.GatherScatter,
numGridLevels = 20,
topGridCellSize = 2.0,
origin = Vector3d(0.0, 0.0, 0.0),
goalTime = 1.0,
maxSteps = None,
statsStep = 10,
smoothIters = 0,
sumForMassDensity = Hydro3d.MassDensityType.RigorousSumDensity,
restoreCycle = None,
graphics = False,
)
def plotField(x, F, titleStr, filename):
import pylab as p
import griddata as g
import numpy
p.ion()
p.clf()
xhat = Vector3d(1, 0, 0)
yhat = Vector3d(0, 1, 0)
numInternalNodes = len(x.internalValues())
indices = [i for i in xrange(numInternalNodes) if abs(x[i].z) < 1e-8]
xs = numpy.array([x[i].dot(xhat) for i in indices])
ys = numpy.array([x[i].dot(yhat) for i in indices])
x1 = p.linspace(-0.5, 1.5, 50)
y1 = p.linspace(-0.5, 1.5, 50)
xg, yg = p.meshgrid(x1, y1)
if isinstance(F, VectorField3d) or isinstance(F[0], Vector3d):
Fxs = numpy.array([F[i].dot(xhat) for i in indices])
Fys = numpy.array([F[i].dot(yhat) for i in indices])
Fxg = g.griddata(xs, ys, Fxs, xg, yg)
Fyg = g.griddata(xs, ys, Fys, xg, yg)
p.quiver(xg, yg, Fxg, Fyg)
else:
# levels = [0.1*i for i in xrange(32)]
Fs = numpy.array([F[i] for i in indices])
Fg = g.griddata(xs, ys, Fs, xg, yg)
p.contour(xg, yg, Fg, 30)
p.colorbar()
p.title(titleStr)
p.savefig(filename)
#-------------------------------------------------------------------------------
# Interpolation kernels.
#-------------------------------------------------------------------------------
WT = TableKernel3d(BSplineKernel3d(), 1000)
WTPi = TableKernel3d(BSplineKernel3d(), 1000)
output("WT")
output("WTPi")
kernelExtent = WT.kernelExtent()
#-------------------------------------------------------------------------------
# A few derived variables.
#-------------------------------------------------------------------------------
nx = ny = n
nz = int(2 * 2 * kernelExtent * nPerh)
nzx = 1.0*nz/nx
xmin = (-0.5, -0.5, -0.5*nzx)
xmax = (1.5, 1.5, 1.5*nzx)
u0 = P0 / ((gamma-1.0)*rho0)
dataDir = "Dedner-divB-%ix%ix%i" % (n, n, n)
#-------------------------------------------------------------------------------
# Check if the necessary output directories exist. If not, create them.
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Material properties.
#-------------------------------------------------------------------------------
eos = GammaLawGasMKS3d(gamma, mu)
#-------------------------------------------------------------------------------
# Make the NodeList.
#-------------------------------------------------------------------------------
# Perfectly conducting node list.
nodes = ConductingFluidNodeList("nodes", eos, WT, WTPi)
output("nodes")
nodes.HsmoothFraction = HsmoothFraction
nodes.XSPH = XSPH
nodes.nodesPerSmoothingScale = nPerh
nodes.epsilonTensile = epsilonTensile
nodes.nTensile = nTensile
nodes.hmin = hmin
nodes.hmax = hmax
nodes.hminratio = hminratio
output("nodes.HsmoothFraction")
output("nodes.nodesPerSmoothingScale")
output("nodes.epsilonTensile")
output("nodes.nTensile")
output("nodes.XSPH")
output("nodes.hmin")
output("nodes.hmax")
output("nodes.hminratio")
#-------------------------------------------------------------------------------
# Construct the neighbor object.
#-------------------------------------------------------------------------------
neighbor1 = NestedGridNeighbor3d(nodes,
neighborSearchType,
numGridLevels,
topGridCellSize,
origin,
kernelExtent)
nodes.registerNeighbor(neighbor1)
#-------------------------------------------------------------------------------
# Set the node properties.
#-------------------------------------------------------------------------------
x = nodes.positions()
v = nodes.velocity()
B = nodes.magneticInduction()
if restoreCycle is None:
from ParMETISDistributeNodes import distributeNodes3d
generator = GenerateNodeDistribution3d(nx, ny, nz, rho0, seed,
xmin = xmin,
xmax = xmax,
nNodePerh = nPerh,
SPH = True)
distributeNodes3d((nodes, generator))
output("mpi.reduce(nodes.numInternalNodes, mpi.MIN)")
output("mpi.reduce(nodes.numInternalNodes, mpi.MAX)")
output("mpi.reduce(nodes.numInternalNodes, mpi.SUM)")
# Set node specific thermal energies
nodes.specificThermalEnergy(ScalarField3d("tmp", nodes, u0))
# Set nodal magnetic inductions.
r = [sqrt(xi.x**2 + xi.y**2) for xi in x.internalValues()]
for nodeID in xrange(nodes.numInternalNodes):
ri = r[nodeID]/r0
if ri < 1.0:
Bx = (ri**8 - 2*ri**4 + 1)/sqrt(4*pi)
else:
Bx = 0.0
B[nodeID] = Vector3d(Bx, 0, Bz)
v[nodeID] = V0
# Plot the B field configuration "before."
#plotField(x, B, 'B before div cleaning', 'B-before.png')
# plotField(x, [Bi.x for Bi in B.internalValues()], 'Bx before div cleaning', 'Bx-before.png')
# Jot down the analytic maximum divergence of B. The expression for
# div B = dBx/dx + dBy/dy + dBz/dz is (16*x*r**2/r0**4)*((r/r0)**4 - 1).
#proj = Vector3d(1., 1., 0)
#rs = [xi.dot(proj) for xi in x.internalValues()]
#divBs = [(16*x[i].x*rs[i]**2/r0**4)*((rs[i]/r0)**4 - 1) for i in xrange(len(x.internalValues()))]
#maxDivB0 = max(divBs)
# Plot div B "before."
#plotField(x, divBs, 'div B before div cleaning', 'divB-before.png')
#-------------------------------------------------------------------------------
# Check if the necessary output directories exist. If not, create them.
#-------------------------------------------------------------------------------
simName = 'Dedner-divB-%ix%ix%i'%(n, n, nz)
dataDir = '/p/lscratcha/jnjohnso/' + simName
visitDir = dataDir + "/visit"
restartDir = dataDir + "/restart"
import os, sys
if mpi.rank == 0:
if restoreCycle is None:
import shutil
if os.path.exists(visitDir):
shutil.rmtree(visitDir)
if os.path.exists(restartDir):
shutil.rmtree(restartDir)
if not os.path.exists(visitDir):
os.makedirs(visitDir)
if not os.path.exists(restartDir):
os.makedirs(restartDir)
mpi.barrier()
#-------------------------------------------------------------------------------
# Construct a DataBase to hold our node list
#-------------------------------------------------------------------------------
db = DataBase3d()
output("db")
output("db.appendNodeList(nodes)")
output("db.numNodeLists")
output("db.numFluidNodeLists")
#-------------------------------------------------------------------------------
# Construct the artificial viscosities for the problem.
#-------------------------------------------------------------------------------
q = MonaghanGingoldViscosity3d(1.0, 0.75)
#q = PriceMonaghanDissipation(1.0, 1.0, 1.0, 0.75, 1.0)
##-------------------------------------------------------------------------------
## Construct the hydro physics object.
##-------------------------------------------------------------------------------
hydro = Hydro3d(WT, WTPi, q, compatibleEnergy, gradhCorrection)
hydro.cfl = cfl
hydro.HEvolution = HEvolution
hydro.sumForMassDensity = sumForMassDensity
hydro.HsmoothMin = hmin
hydro.HsmoothMax = hmax
#output("hydro")
#output("hydro.cfl")
#output("hydro.HEvolution")
#output("hydro.sumForMassDensity")
#output("hydro.HsmoothMin")
#output("hydro.HsmoothMax")
#output("hydro.kernel()")
#output("hydro.PiKernel()")
#output("hydro.valid()")
#-------------------------------------------------------------------------------
# Construct an MHD object.
#-------------------------------------------------------------------------------
mhd = MHD(WT, mu0)
if divBCleaner == 'none':
mhd.divBCleaner = MHD.BDivergenceCleanerType.noCleaner
elif divBCleaner == 'hyperbolic':
mhd.divBCleaner = MHD.BDivergenceCleanerType.hyperbolicCleaner
elif divBCleaner == 'GreensFn':
mhd.divBCleaner = MHD.BDivergenceCleanerType.GreensFnProjCleaner
elif divBCleaner == 'BiotSavart':
mhd.divBCleaner = MHD.BDivergenceCleanerType.BiotSavartProjCleaner
else:
raise ValueError, "divBCleaner must be 'hyperBolic', 'GreensFn', 'BiotSavart', or 'none'."
#-------------------------------------------------------------------------------
# Create boundary conditions.
#-------------------------------------------------------------------------------
xPlane1 = Plane3d(Vector3d(-0.5, 0.0, 0.0), Vector3d( 1.0, 0.0, 0.0))
xPlane2 = Plane3d(Vector3d( 1.5, 0.0, 0.0), Vector3d(-1.0, 0.0, 0.0))
yPlane1 = Plane3d(Vector3d( 0.0,-0.5, 0.0), Vector3d( 0.0, 1.0, 0.0))
yPlane2 = Plane3d(Vector3d( 0.0, 1.5, 0.0), Vector3d( 0.0,-1.0, 0.0))
zPlane1 = Plane3d(Vector3d( 0.0, 0.0,-0.5*nzx), Vector3d( 0.0, 0.0, 1.0))
zPlane2 = Plane3d(Vector3d( 0.0, 0.0, 1.5*nzx), Vector3d( 0.0, 0.0,-1.0))
xbc = PeriodicBoundary3d(xPlane1, xPlane2)
ybc = PeriodicBoundary3d(yPlane1, yPlane2)
zbc = PeriodicBoundary3d(zPlane1, zPlane2)
hydro.appendBoundary(xbc)
hydro.appendBoundary(ybc)
hydro.appendBoundary(zbc)
mhd.appendBoundary(xbc)
mhd.appendBoundary(ybc)
mhd.appendBoundary(zbc)
#-------------------------------------------------------------------------------
# Construct a time integrator.
#-------------------------------------------------------------------------------
integrator = SynchronousRK2Integrator3d(db)
integrator.appendPhysicsPackage(hydro)
integrator.appendPhysicsPackage(mhd)
integrator.verbose = True
integrator.rigorousBoundaries = True
integrator.lastDt = 1e-3
output("integrator")
output("integrator.havePhysicsPackage(hydro)")
output("integrator.havePhysicsPackage(mhd)")
output("integrator.valid()")
#-------------------------------------------------------------------------------
# Build the controller.
#-------------------------------------------------------------------------------
#raw_input()
restartBaseName = '%s/%s'%(restartDir, simName)
control = SpheralController(integrator, WT,
statsStep = statsStep,
initializeMassDensity = True,
restartBaseName = restartBaseName)
output("control")
#print 'max |div B| (0):', maxDivB0
# Restore if desired.
if restoreCycle is not None:
if restoreCycle == -1:
restoreCycle = findLastRestart(simName)
control.loadRestartFile(restoreCycle)
else:
dumpPhysicsState(integrator, simName, visitDir, dumpDerivatives = True)
output("integrator.dtGrowth")
# If we're using a projection scheme to clean div B, advance one step and
# read off our diagnostics.
if mhd.divBCleaner == MHD.BDivergenceCleanerType.GreensFnProjCleaner or \
mhd.divBCleaner == MHD.BDivergenceCleanerType.BiotSavartProjCleaner:
control.advance(control.time() + 1e-10, 1)
maxDivB1 = max(mhd.maxDivB(), abs(mhd.minDivB()))
# Otherwise, go get 'em!
else:
while control.time() < goalTime:
dt = goalTime/10
control.advance(min(goalTime, control.time() + dt), maxSteps)
control.dropRestartFile()
dumpPhysicsState(integrator, simName, visitDir, dumpDerivatives = True)
maxDivB1 = max(mhd.maxDivB(), abs(mhd.minDivB()))
print 'max |div B| (1):', maxDivB1
# Plot the final field configuration (and its divergence).
#plotField(x, B, 'B after div cleaning', 'B-after.png')
#plotField(x, [Bi.x for Bi in B.internalValues()], 'Bx after div cleaning', 'Bx-after.png')
#plotField(x, nodes.magneticDivergence(), 'div B after div cleaning', 'divB-after.png')
```
#### File: functional/Surfaces/Surface.py
```python
from math import *
import mpi
import os, sys, shutil
from Spheral2d import *
from SpheralTestUtilities import *
from findLastRestart import *
import SpheralPointmeshSiloDump
from GenerateNodeDistribution2d import *
title("Surface Detection Test")
class Rejecter(object):
def __init__(self,radius):
self.radius = radius
def __call__(self,x,y,m,H):
nX = []
nY = []
nM = []
nH = []
for i in xrange(len(x)):
ri = sqrt(x[i]*x[i]+y[i]*y[i])
if (ri > self.radius):
nX.append(x[i])
nY.append(y[i])
nM.append(m[i])
nH.append(H[i])
return nX,nY,nM,nH
class dSurface(object):
def __init__(self,nodes,db,Kern,Bf,Sf,hydro,file):
self.nodes = nodes
self.db = db
self.Kern = Kern
self.Bf = Bf
self.Sf = Sf
self.hydro = hydro
self.file = file
def __call__(self,cycle,time,dt):
#self.renormMat()
self.momentNorm()
def renormMat(self):
f = open(self.file, 'w')
f.write("i\tSi\txi\n")
self.db.updateConnectivityMap(True)
cm = self.db.connectivityMap()
for i in xrange(self.nodes.numInternalNodes):
xi = self.nodes.positions()[i]
Hi = self.nodes.Hfield()[i]
neighbors = cm.connectivityForNode(self.nodes, i)
Bi = Tensor.zero
Vi = self.hydro.volume()[0][i]
for j in neighbors[0]:
xj = self.nodes.positions()[j]
xij = xj-xi
Hj = self.nodes.Hfield()[j]
Vj = self.hydro.volume()[0][j] # this could be done better
gWj = Hj*xij.unitVector()*self.Kern.gradValue((Hj*xij).magnitude(),Hj.Determinant())
Bij = gWj.dyad(xij)*Vj
Bi += Bij
Bi = Bi.Inverse()
Ei = Bi.eigenValues()
Si = min(abs(Ei[0]),abs(Ei[1]))
f.write("%d\t%f\t%f\n" % (i,Si,xi.magnitude()))
def momentNorm(self):
f = open(self.file, 'w')
f.write("i\tSi\txi\tSSi\n")
for i in xrange(self.nodes.numInternalNodes):
xi = self.nodes.positions()[i]
m0i = self.hydro.m0()[0][i]
m1i = self.hydro.m1()[0][i]
f.write("%d\t%f\t%f\t%f\n" %(i,m0i,xi.magnitude(),m1i.magnitude()))
#-------------------------------------------------------------------------------
# Generic problem parameters
#-------------------------------------------------------------------------------
commandLine(lattice = True,
nx = 50,
ny = 50,
rmin = 0.0,
rmax = 1.0,
nPerh = 1.01,
rho0 = 1.0,
eps0 = 0.0,
gamma = 5.0/3.0,
mu = 1.0,
rhomin = 1.0e-8,
holeRadius = 0.5,
ASPH = False,
CRKSPH = True,
SPH = True,
XSPH = False,
filter = 0,
KernelConstructor = NBSplineKernel,
order = 7,
# Hydro
Qconstructor = MonaghanGingoldViscosity2d,
correctionOrder = LinearOrder,
Cl = 1.0,
Cq = 2.0,
Qlimiter = False,
balsaraCorrection = False,
epsilon2 = 1e-4,
negligibleSoundSpeed = 1e-5,
csMultiplier = 0.1,
hmin = 0.004,
hmax = 10.0,
hminratio = 0.1,
compatibleEnergy = False,
gradhCorrection = False,
HEvolution = IdealH,
sumForMassDensity = RigorousSumDensity,
densityUpdate = RigorousSumDensity,
HUpdate = IdealH,
linearInExpansion = False,
volumeType = RKVoronoiVolume,
# Timestep constraints
cfl = 0.5,
deltaPhi = 0.01,
domainIndependent = False,
# Integrator
IntegratorConstructor = CheapSynchronousRK2Integrator,
goalTime = 1.0,
dt = 0.0001,
dtMin = 1.0e-5,
dtMax = 1.0e5,
dtGrowth = 2.0,
maxSteps = None,
steps = None,
statsStep = 10,
redistributeStep = 500,
restartStep = 500,
restoreCycle = None,
smoothIters = 0,
rigorousBoundaries = True,
dtverbose = False,
vizCycle = 1,
vizTime = 1.0e5,
vizMethod = SpheralPointmeshSiloDump.dumpPhysicsState,
clearDirectories = False,
renormFile = "renorm.txt",
detectSurfaces = False,
detectRange = 2.0,
sweepAngle = pi/4.0,
detectThreshold = 0.99,
checkAnswer = False,
)
if CRKSPH:
Qconstructor = LimitedMonaghanGingoldViscosity2d
if ASPH:
HydroConstructor = ACRKSPHHydro
else:
HydroConstructor = CRKSPHHydro
else:
if ASPH:
HydroConstructor = ASPHHydro
else:
HydroConstructor = SPHHydro
dataDir = "surface-%i-%i" % (nx,ny)
dataDir = os.path.join(dataDir, "CRK=%s-nPerh=%f" % (CRKSPH,nPerh))
dataDir = os.path.join(dataDir, "Cl=%f-Cq=%f" % (Cl,Cq))
restartBaseName = "%s/SurfaceTest-%i-%i" % (dataDir,nx,ny)
vizDir = os.path.join(dataDir, "visit")
vizBaseName = "SurfaceTest"
#-------------------------------------------------------------------------------
# Check if the necessary output directories exist. If not, create them.
#-------------------------------------------------------------------------------
import os, sys
if mpi.rank == 0:
if clearDirectories and os.path.exists(dataDir):
shutil.rmtree(dataDir)
if not os.path.exists(dataDir):
os.makedirs(dataDir)
if not os.path.exists(vizDir):
os.makedirs(vizDir)
mpi.barrier()
#-------------------------------------------------------------------------------
# If we're restarting, find the set of most recent restart files.
#-------------------------------------------------------------------------------
if restoreCycle is None:
restoreCycle = findLastRestart(restartBaseName)
#-------------------------------------------------------------------------------
# Material properties.
#-------------------------------------------------------------------------------
eos = GammaLawGasMKS(gamma, mu)
#-------------------------------------------------------------------------------
# Interpolation kernels.
#-------------------------------------------------------------------------------
if KernelConstructor==NBSplineKernel:
Wbase = NBSplineKernel(order)
else:
Wbase = KernelConstructor()
WT = TableKernel(KernelConstructor(order), 1000)
WTPi = TableKernel(KernelConstructor(order), 1000)
output('WT')
output('WTPi')
kernelExtent = WT.kernelExtent
output("WT")
nodes1 = makeFluidNodeList("nodes1", eos,
hmin = hmin,
hmax = hmax,
nPerh = nPerh,
kernelExtent = kernelExtent,
rhoMin = rhomin)
#-------------------------------------------------------------------------------
# Set the node properties.
#-------------------------------------------------------------------------------
pos = nodes1.positions()
vel = nodes1.velocity()
mass = nodes1.mass()
eps = nodes1.specificThermalEnergy()
H = nodes1.Hfield()
if restoreCycle is None:
if lattice == True:
xmin = (-1.0, -1.0)
xmax = (1.0, 1.0)
myRejecter = Rejecter(holeRadius)
generator = GenerateNodeDistribution2d(nx,ny,rho0,"lattice",
rmin = rmin,
rmax = rmax,
xmin = xmin,
xmax = xmax,
theta = 2*pi,
nNodePerh = nPerh,
SPH = (not ASPH),
rejecter = myRejecter)
if mpi.procs > 1:
from VoronoiDistributeNodes import distribueNodes2d
else:
from DistributeNodes import distributeNodes2d
distributeNodes2d((nodes1,generator))
output("mpi.reduce(nodes1.numInternalNodes, mpi.MIN)")
output("mpi.reduce(nodes1.numInternalNodes, mpi.MAX)")
output("mpi.reduce(nodes1.numInternalNodes, mpi.SUM)")
for nodeID in xrange(nodes1.numInternalNodes):
eps[nodeID] = eps0
#-------------------------------------------------------------------------------
# Construct a DataBase to hold our node list
#-------------------------------------------------------------------------------
db = DataBase()
output("db")
output("db.appendNodeList(nodes1)")
output("db.numNodeLists")
output("db.numFluidNodeLists")
Bf = db.newFluidTensorFieldList(Tensor.zero, "Normalization")
Sf = db.newFluidScalarFieldList(0.0, "Surface")
#-------------------------------------------------------------------------------
# Construct the artificial viscosity.
#-------------------------------------------------------------------------------
q = Qconstructor(Cl, Cq, linearInExpansion)
q.epsilon2 = epsilon2
q.limiter = Qlimiter
q.balsaraShearCorrection = balsaraCorrection
output("q")
output("q.Cl")
output("q.Cq")
output("q.epsilon2")
output("q.limiter")
output("q.balsaraShearCorrection")
output("q.linearInExpansion")
output("q.quadraticInExpansion")
#-------------------------------------------------------------------------------
# Construct the hydro physics object.
#-------------------------------------------------------------------------------
if CRKSPH:
hydro = HydroConstructor(W = WT,
WPi = WTPi,
Q = q,
filter = filter,
cfl = cfl,
compatibleEnergyEvolution = compatibleEnergy,
XSPH = XSPH,
densityUpdate = densityUpdate,
correctionOrder = correctionOrder,
volumeType = volumeType,
HUpdate = HEvolution,
detectSurfaces = detectSurfaces,
detectThreshold = detectThreshold,
sweepAngle = sweepAngle,
detectRange = detectRange)
else:
hydro = HydroConstructor(W = WT,
Q = q,
cfl = cfl,
compatibleEnergyEvolution = compatibleEnergy,
evolveTotalEnergy = evolveTotalEnergy,
gradhCorrection = gradhCorrection,
correctVelocityGradient = correctVelocityGradient,
densityUpdate = densityUpdate,
XSPH = XSPH,
HUpdate = HEvolution)
output("hydro")
output("hydro.kernel()")
output("hydro.PiKernel()")
output("hydro.cfl")
output("hydro.compatibleEnergyEvolution")
output("hydro.XSPH")
output("hydro.densityUpdate")
output("hydro.HEvolution")
packages = [hydro]
#-------------------------------------------------------------------------------
# Construct the surface detection periodic work function
#-------------------------------------------------------------------------------
#ds = detectSurface(nodes1,db,WT,Bf,Sf,hydro,renormFile)
#dsFreq = 1
#-------------------------------------------------------------------------------
# Construct a time integrator, and add the one physics package.
#-------------------------------------------------------------------------------
integrator = IntegratorConstructor(db)
for p in packages:
integrator.appendPhysicsPackage(p)
integrator.lastDt = dt
if dtMin:
integrator.dtMin = dtMin
if dtMax:
integrator.dtMax = dtMax
integrator.dtGrowth = dtGrowth
output("integrator")
output("integrator.havePhysicsPackage(hydro)")
output("integrator.dtGrowth")
output("integrator.lastDt")
output("integrator.dtMin")
output("integrator.dtMax")
#-------------------------------------------------------------------------------
# Build the controller.
#-------------------------------------------------------------------------------
control = SpheralController(integrator, WT,
statsStep = statsStep,
restartStep = restartStep,
restartBaseName = restartBaseName,
restoreCycle = restoreCycle,
vizMethod = vizMethod,
vizBaseName = "surface-test-%ix%i" % (nx, ny),
vizDir = vizDir,
vizStep = vizCycle,
vizTime = vizTime,
SPH = (not ASPH))
output("control")
#control.appendPeriodicWork(ds,dsFreq)
#-------------------------------------------------------------------------------
# Finally run the problem and plot the results.
#-------------------------------------------------------------------------------
if not steps is None:
control.step(steps)
else:
control.advance(goalTime,maxSteps)
if checkAnswer:
sp = hydro.surfacePoint()
count = 0
for i in xrange(nodes1.numInternalNodes):
if sp[0][i] == 1:
count += 1
if not count == 212:
raise ValueError, "The surface detection algorithm failed!"
else:
print "Surface Detection PASSED."
```
#### File: unit/CRKSPH/testInterpolation.py
```python
from Spheral import *
from SpheralTestUtilities import *
title("Interpolation tests")
#-------------------------------------------------------------------------------
# Generic problem parameters
#-------------------------------------------------------------------------------
commandLine(
# Parameters for seeding nodes.
nx1 = 50,
nx2 = 50,
rho1 = 1.0,
rho2 = 1.0,
eps1 = 0.0,
eps2 = 0.0,
x0 = 0.0,
x1 = 0.5,
x2 = 1.0,
nPerh = 4.01,
hmin = 0.0001,
hmax = 1000.0,
# What order of reproducing kernel should we use (0,1,2)?
correctionOrder = LinearOrder,
# Should we randomly perturb the positions?
ranfrac = 0.2,
seed = 14892042,
# What test problem are we doing?
testDim = "1d",
testCase = "linear",
# Should we compare with SPH?
testSPH = True,
# The fields we're going to interpolate.
# Linear coefficients: y = y0 + m0*x
y0 = 1.0,
m0 = 1.0,
# Quadratic coefficients: y = y2 + m2*x^2
y2 = 1.0,
m2 = 0.5,
gamma = 5.0/3.0,
mu = 1.0,
# Parameters for iterating H.
iterateH = True,
maxHIterations = 200,
Htolerance = 1.0e-4,
# Parameters for passing the test
interpolationTolerance = 5.0e-7,
derivativeTolerance = 5.0e-5,
graphics = True,
plotKernels = False,
outputFile = "None",
)
assert testCase in ("linear", "quadratic", "step")
assert testDim in ("1d", "2d", "3d")
FacetedVolume = {"1d" : Box1d,
"2d" : Polygon,
"3d" : Polyhedron}[testDim]
#-------------------------------------------------------------------------------
# Appropriately set generic object names based on the test dimensionality.
#-------------------------------------------------------------------------------
exec("from Spheral%s import *" % testDim)
## import Spheral
## for name in [x for x in Spheral.__dict__ if testDim in x]:
## exec("%s = Spheral.__dict__['%s']" % (name.replace(testDim, ""), name))
#-------------------------------------------------------------------------------
# Create a random number generator.
#-------------------------------------------------------------------------------
import random
rangen = random.Random()
rangen.seed(seed)
#-------------------------------------------------------------------------------
# Material properties.
#-------------------------------------------------------------------------------
eos = GammaLawGasMKS(gamma, mu)
#-------------------------------------------------------------------------------
# Interpolation kernels.
#-------------------------------------------------------------------------------
WT = TableKernel(WendlandC4Kernel(), 1000)
output("WT")
kernelExtent = WT.kernelExtent
#-------------------------------------------------------------------------------
# Make the NodeList.
#-------------------------------------------------------------------------------
nodes1 = makeFluidNodeList("nodes1", eos,
hmin = hmin,
hmax = hmax,
nPerh = nPerh)
nodes2 = makeFluidNodeList("nodes2", eos,
hmin = hmin,
hmax = hmax,
nPerh = nPerh)
nodeSet = [nodes1, nodes2]
for nodes in nodeSet:
output("nodes")
output("nodes.hmin")
output("nodes.hmax")
output("nodes.nodesPerSmoothingScale")
#-------------------------------------------------------------------------------
# Set the node properties.
#-------------------------------------------------------------------------------
if testDim == "1d":
from DistributeNodes import distributeNodesInRange1d
distributeNodesInRange1d([(nodes1, [(nx1, rho1, (x0, x1))])], nPerh = nPerh)
distributeNodesInRange1d([(nodes2, [(nx2, rho2, (x1, x2))])], nPerh = nPerh)
elif testDim == "2d":
from DistributeNodes import distributeNodes2d
from GenerateNodeDistribution2d import GenerateNodeDistribution2d
gen1 = GenerateNodeDistribution2d(nx1, nx1 + nx2, rho1,
distributionType = "lattice",
xmin = (x0, x0),
xmax = (x1, x2),
nNodePerh = nPerh,
SPH = True)
gen2 = GenerateNodeDistribution2d(nx2, nx1 + nx2, rho2,
distributionType = "lattice",
xmin = (x1, x0),
xmax = (x2, x2),
nNodePerh = nPerh,
SPH = True)
distributeNodes2d((nodes1, gen1),
(nodes2, gen2))
elif testDim == "3d":
from DistributeNodes import distributeNodes3d
from GenerateNodeDistribution3d import GenerateNodeDistribution3d
gen1 = GenerateNodeDistribution3d(nx1, nx1 + nx2, nx1 + nx2, rho1,
distributionType = "lattice",
xmin = (x0, x0, x0),
xmax = (x1, x2, x2),
nNodePerh = nPerh,
SPH = True)
gen2 = GenerateNodeDistribution3d(nx2, nx1 + nx2, nx1 + nx2, rho2,
distributionType = "lattice",
xmin = (x1, x0, x0),
xmax = (x2, x2, x2),
nNodePerh = nPerh,
SPH = True)
distributeNodes3d((nodes1, gen1),
(nodes2, gen2))
else:
raise ValueError, "Only tests cases for 1d,2d and 3d."
for nodes in nodeSet:
output("nodes.name, nodes.numNodes")
# Set node properties.
for nodes, eps0 in ((nodes1, eps1),
(nodes2, eps2)):
eps = nodes.specificThermalEnergy()
for i in xrange(nodes.numInternalNodes):
eps[i] = eps0
#-------------------------------------------------------------------------------
# Optionally randomly jitter the node positions.
#-------------------------------------------------------------------------------
dx1 = (x1 - x0)/nx1
dx2 = (x2 - x1)/nx2
dy = (x2 - x0)/(nx1 + nx2)
dz = (x2 - x0)/(nx1 + nx2)
for nodes, dx in ((nodes1, dx1),
(nodes2, dx2)):
pos = nodes.positions()
for i in xrange(nodes.numInternalNodes):
if testDim == "1d":
pos[i].x += ranfrac * dx * rangen.uniform(-1.0, 1.0)
elif testDim == "2d":
pos[i].x += ranfrac * dx * rangen.uniform(-1.0, 1.0)
pos[i].y += ranfrac * dy * rangen.uniform(-1.0, 1.0)
elif testDim == "3d":
pos[i].x += ranfrac * dx * rangen.uniform(-1.0, 1.0)
pos[i].y += ranfrac * dy * rangen.uniform(-1.0, 1.0)
pos[i].z += ranfrac * dz * rangen.uniform(-1.0, 1.0)
#-------------------------------------------------------------------------------
# Construct a DataBase to hold our node list
#-------------------------------------------------------------------------------
db = DataBase()
for nodes in nodeSet:
db.appendNodeList(nodes)
output("db")
output("db.numNodeLists")
output("db.numFluidNodeLists")
#-------------------------------------------------------------------------------
# Iterate the h to convergence if requested.
#-------------------------------------------------------------------------------
if iterateH:
bounds = vector_of_Boundary()
method = SPHSmoothingScale()
iterateIdealH(db,
bounds,
WT,
method,
maxHIterations,
Htolerance)
#-------------------------------------------------------------------------------
# Initialize our field.
#-------------------------------------------------------------------------------
f = db.newFluidScalarFieldList(name="test field")
pos = db.fluidPosition
for iNodeList, nodes in enumerate(db.nodeLists()):
for i in xrange(nodes.numInternalNodes):
x = pos(iNodeList, i).x
if testCase == "linear":
f[iNodeList][i] = y0 + m0*x
elif testCase == "quadratic":
f[iNodeList][i] = y2 + m2*x*x
elif testCase == "step":
if x < x1:
f[iNodeList][i] = y0
else:
f[iNodeList][i] = 2*y0
#-------------------------------------------------------------------------------
# Prepare variables to accumulate the test values.
#-------------------------------------------------------------------------------
fSPH = db.newFluidScalarFieldList(name="SPH interpolated values")
dfSPH = db.newFluidVectorFieldList(name="SPH derivative values")
A = db.newFluidScalarFieldList(name="A")
B = db.newFluidVectorFieldList(name="B")
C = db.newFluidTensorFieldList(name="C")
gradA = db.newFluidVectorFieldList(name="gradA")
gradB = db.newFluidTensorFieldList(name="gradB")
gradC = db.newFluidThirdRankTensorFieldList(name="gradB")
M0 = db.newFluidScalarFieldList(name="M0")
M1 = db.newFluidVectorFieldList(name="M1")
M2 = db.newFluidSymTensorFieldList(name="M2")
M3 = db.newFluidThirdRankTensorFieldList(name="M3")
M4 = db.newFluidFourthRankTensorFieldList(name="M4")
gradM0 = db.newFluidVectorFieldList(name="grad M0")
gradM1 = db.newFluidTensorFieldList(name="grad M1")
gradM2 = db.newFluidThirdRankTensorFieldList(name="grad M2")
gradM3 = db.newFluidFourthRankTensorFieldList(name="grad M3")
gradM4 = db.newFluidFifthRankTensorFieldList(name="grad M4")
surfacePoint = db.newFluidIntFieldList(name="surface point")
db.updateConnectivityMap(True)
cm = db.connectivityMap()
position = db.fluidPosition
weight = db.fluidMass
weight /= db.fluidMassDensity
H = db.fluidHfield
# Compute the volumes to use as weighting.
#polyvol = db.newFluidFacetedVolumeFieldList(name=FacetedVolume(), "polyvols")
#weight = db.newFluidScalarFieldList(name=1.0, "volume")
#computeHullVolumes(cm, position, polyvol, weight)
computeCRKSPHMoments(cm, WT, weight, position, H, correctionOrder, NodeCoupling(),
M0, M1, M2, M3, M4, gradM0, gradM1, gradM2, gradM3, gradM4)
computeCRKSPHCorrections(M0, M1, M2, M3, M4, gradM0, gradM1, gradM2, gradM3, gradM4, H,
surfacePoint,
correctionOrder,
A, B, C, gradA, gradB, gradC)
#-------------------------------------------------------------------------------
# Measure the interpolated values and gradients.
#-------------------------------------------------------------------------------
if testSPH:
for iNodeList, nodes in enumerate(db.nodeLists()):
for i in xrange(nodes.numInternalNodes):
ri = position(iNodeList, i)
Hi = H(iNodeList, i)
Hdeti = Hi.Determinant()
wi = weight(iNodeList, i)
fi = f(iNodeList, i)
# Self contribution.
W0 = WT.kernelValue(0.0, Hdeti)
fSPH[iNodeList][i] = wi*W0 * fi
# Go over them neighbors.
allneighbors = cm.connectivityForNode(iNodeList, i)
for jNodeList, neighbors in enumerate(allneighbors):
for j in neighbors:
rj = position(jNodeList, j)
Hj = H(jNodeList, j)
Hdetj = Hj.Determinant()
wj = weight(jNodeList, j)
fj = f(jNodeList, j)
# The standard SPH kernel and it's gradient.
rij = ri - rj
etai = Hi*rij
etaj = Hj*rij
Wj = WT.kernelValue(etaj.magnitude(), Hdetj)
gradWj = Hj*etaj.unitVector() * WT.gradValue(etaj.magnitude(), Hdetj)
# Increment our interpolated values.
fSPH[iNodeList][i] += fj * wj*Wj
# Increment the derivatives.
dfSPH[iNodeList][i] += fj * wj*gradWj
#-------------------------------------------------------------------------------
# Check the C++ interpolation and gradient methods.
#-------------------------------------------------------------------------------
fRK = interpolateCRKSPH(f, position, weight, H, A, B, C,
cm, correctionOrder, WT)
dfRK = gradientCRKSPH(f, position, weight, H,
A, B, C, gradA, gradB, gradC,
cm, correctionOrder, WT)
#-------------------------------------------------------------------------------
# Prepare the answer to check against.
#-------------------------------------------------------------------------------
yans = db.newFluidScalarFieldList(name="interpolation answer")
dyans = db.newFluidScalarFieldList(name="derivative answer")
for iNodeList in xrange(db.numNodeLists):
n = yans[iNodeList].numInternalElements
for i in xrange(n):
xi = position(iNodeList, i).x
if testCase == "linear":
yans[iNodeList][i] = y0 + m0*xi
dyans[iNodeList][i] = m0
elif testCase == "quadratic":
yans[iNodeList][i] = y2 + m2*xi*xi
dyans[iNodeList][i] = 2*m2*xi
elif testCase == "step":
if iNodeList == 0:
yans[iNodeList][i] = y0
else:
yans[iNodeList][i] = 2*y0
dyans[iNodeList][i] = 0.0
#-------------------------------------------------------------------------------
# Check our answers accuracy.
#-------------------------------------------------------------------------------
def flattenFieldList(fl):
result = []
for f in fl:
result += list(f.internalValues())
return result
errySPH = flattenFieldList(fSPH - yans)
erryRK = flattenFieldList(fRK - yans)
errdySPH = []
errdyRK = []
for iNodeList in xrange(db.numNodeLists):
n = fSPH[iNodeList].numInternalElements
for i in xrange(n):
errdySPH.append(dfSPH(iNodeList, i).x - dyans(iNodeList, i))
errdyRK.append(dfRK(iNodeList, i).x - dyans(iNodeList, i))
maxySPHerror = max([abs(x) for x in errySPH])
maxdySPHerror = max([abs(x) for x in errdySPH])
maxyRKerror = max([abs(x) for x in erryRK])
maxdyRKerror = max([abs(x) for x in errdyRK])
print "Maximum errors (interpolation): SPH = %g, RK = %g" % (maxySPHerror, maxyRKerror)
print "Maximum errors (derivatives): SPH = %g, RK = %g" % (maxdySPHerror, maxdyRKerror)
# Output timing tables.
Timer.TimerSummary()
#-------------------------------------------------------------------------------
# Plot the things.
#-------------------------------------------------------------------------------
if graphics:
from SpheralMatplotlib import *
xans = [x.x for x in flattenFieldList(position)]
# Interpolated values.
p1 = plotFieldList(fRK,
plotStyle = "g*",
lineTitle = "RK",
winTitle = "Interpolated values")
if testSPH:
plotFieldList(fSPH,
plotStyle = "r+",
lineTitle = "SPH",
plot = p1)
plotFieldList(yans,
plotStyle = "k-",
lineTitle = "Answer",
plot = p1)
# Interpolation error
p2 = newFigure()
p2.plot(xans, erryRK, "g*",
label = "RK")
if testSPH:
p2.plot(xans, errySPH, "r+",
label = "SPH")
p2.axes.legend()
plt.title("Error in interpolation")
# Derivative values.
p3 = plotFieldList(dfRK,
yFunction = "%s.x",
plotStyle = "g*",
lineTitle = "RK",
winTitle = "Derivative values")
if testSPH:
plotFieldList(dfSPH,
yFunction = "%s.x",
plotStyle = "r+",
lineTitle = "SPH",
plot = p3)
plotFieldList(dyans,
plotStyle = "k-",
lineTitle = "Answer",
plot = p3)
# Derivative error
p4 = newFigure()
p4.plot(xans, errdyRK, "g*",
label = "RK")
if testSPH:
p4.plot(xans, errdySPH, "r+",
label = "SPH")
p4.axes.legend()
plt.title("Error in derivatives")
# Plot the kernel shapes as appropriate.
if testDim == "1d":
p7 = newFigure()
j = -2 # int(nodes1.numInternalNodes/2)
Hj = H[1][j]
hj = 1.0/Hj.xx
Hdetj = Hj.Determinant()
Aj = A[1][j]
Bj = B[1][j].x
Cj = C[1][j].xx
nsamp = 100
dx = 4.0/nsamp
xvals = [i*dx - 2.0 for i in xrange(nsamp)]
W = [WT.kernelValue(abs(xi), Hdetj) for xi in xvals]
WR = [Wi*Aj*(1.0 + Bj*(xi)*hj+Cj*(xi)*(xi)*hj*hj) for xi, Wi in zip(xvals, W)]
p7.plot(xvals, W, "r-", label="SPH")
p7.plot(xvals, WR, "g-", label="RK")
p7.axes.legend()
plt.title("Kernel")
if outputFile != "None":
f = open("Kernel_" + outputFile, "w")
f.write(("#" + 3*' "%20s"' + "\n") % ("eta", "Wj", "WRj"))
for xi, Wi, WRi in zip(xvals, W, WR):
f.write((3*" %20g" + "\n") % (xi, Wi, WRi))
f.close()
# We may want a gnu/pdv style text file.
if outputFile != "None" and testDim == "2d":
of = open(outputFile, "w")
of.write(('#' + 7*' "%20s"' + '\n') % ("x", "interp answer", "grad answer", "interp SPH", "interp CRK", "grad SPH", "grad CRK"))
for iNodeList, nodes in enumerate(db.nodeLists()):
for i in xrange(nodes.numInternalNodes):
of.write((7*" %20g" + "\n") %
(position(iNodeList,i), yans(iNodeList,i), dyans(iNodeList,i), fSPH(iNodeList,i), fRK(iNodeList,i), dfSPH(iNodeList,i).x, dfRK(iNodeList,i).x))
of.close()
# If we're in 2D/3D dump a silo file too.
if testDim != "1d":
from siloPointmeshDump import siloPointmeshDump
siloPointmeshDump("testInterpolation_%s_%s" % (testCase, testDim),
fieldLists = [fSPH, fRK, dfSPH, dfRK,
yans, dyans,
weight, H, A, B, gradA, gradB,
M0, M1, M2])
if plotKernels:
import Gnuplot
pk = generateNewGnuPlot()
for iNodeList, nodes in enumerate(db.nodeLists()):
for i in xrange(nodes.numInternalNodes):
xi = positions(iNodeList,i).x
Hi = H(iNodeList,i)
Hdeti = Hi.Determinant()
hi = 1.0/Hi.xx
Ai = A(iNodeList,i)
Bi = B(iNodeList,i)
Ci = C(iNodeList,i)
dx = 2.0*kernelExtent*hi/50
x = [xi - kernelExtent*hi + (i + 0.5)*dx for i in xrange(50)]
#y = [Ai*(1.0 + Bi.x*(xi - xj))*WT.kernelValue(abs(xi - xj)/hi, Hdeti) for xj in x]
y = [Ai*(1.0 + Bi.x*(xi - xj)+Ci.xx*(xi-xj)*(xi-xj))*WT.kernelValue(abs(xi - xj)/hi, Hdeti) for xj in x]
d = Gnuplot.Data(x, y, with_="lines", inline=True)
pk.replot(d)
#-------------------------------------------------------------------------------
# Check the maximum RK error and fail the test if it's out of bounds.
#-------------------------------------------------------------------------------
if maxyRKerror > interpolationTolerance:
raise ValueError, "RK interpolation error out of bounds: %g > %g" % (maxyRKerror, interpolationTolerance)
if maxdyRKerror > derivativeTolerance:
raise ValueError, "RK derivative error out of bounds: %g > %g" % (maxdyRKerror, derivativeTolerance)
```
#### File: unit/Distributed/TestSortAndDivideDistribute.py
```python
from math import *
import unittest
from Spheral import *
from testDistributeByPosition1d import TestDistributeByPosition1d
from testDistributeByPosition2d import TestDistributeByPosition2d
from testParmetisDistribute import testParmetisRedistribute2d, testParmetisRedistribute3d
#===============================================================================
# Load mpi, and figure out how may domains to set up, and which domain we are.
#===============================================================================
import mpi
domainID = mpi.rank
nDomains = mpi.procs
#===============================================================================
# Main testing class for the 1-D tests.
#===============================================================================
class TestSortAndDivideRedistribute1d(TestDistributeByPosition1d):
# The actual test itself!
def testIt(self):
print "Testing SortAndDivideRedistribute1d on domain %i of %i domains" % \
(domainID, nDomains)
# Record how many nodes we're starting with.
nNodesGlobal = []
for nodeList in [self.dataBase.nodeLists()[i] for i in xrange(self.dataBase.numNodeLists)]:
nNodesGlobal.append(mpi.allreduce(nodeList.numInternalNodes, mpi.SUM))
print "Total num nodes: ", nNodesGlobal, sum(nNodesGlobal)
# Go ahead and redistribute those nodes!
repartition = SortAndDivideRedistributeNodes1d(2.0)
repartition.redistributeNodes(self.dataBase)
# Make sure that the numbers of nodes are correct.
assert self.dataBase.numNodeLists == len(nNodesGlobal)
for nodeList, nGlobal in zip([self.dataBase.nodeLists()[i] for i in xrange(self.dataBase.numNodeLists)],
nNodesGlobal):
n = mpi.allreduce(nodeList.numInternalNodes, mpi.SUM)
if n != nGlobal:
self.fail("Wrong number of nodes: %i != %i" % (n, nGlobal))
## # Do a consistency check of the new distribution.
## nodeDistribution = repartition.currentDomainDecomposition(self.dataBase)
## if (not repartition.validDomainDecomposition(nodeDistribution,
## self.dataBase)):
## self.fail("Invalid domain decomposition.")
# Have each domain figure out it's min and max x. This is
# just a report, since it's not necessarily true that each domain
# is exclusive in x range.
localxmin = 1e10
localxmax = -1e10
for nodeList in [self.dataBase.nodeLists()[i] for i in xrange(self.dataBase.numNodeLists)]:
if nodeList.numInternalNodes > 0:
localxmin = min(localxmin, min([r.x for r in nodeList.positions().internalValues()] + [1e60]))
localxmax = max(localxmax, max([r.x for r in nodeList.positions().internalValues()] + [-1e60]))
import sys
sys.stderr.write("Process %i in x range (%f, %f)\n" % (domainID, localxmin, localxmax))
# Build a diagnostic plot to help show how the domains are distributed.
domain = ScalarFieldList1d()
domain.copyFields()
for nodes in self.dataBase.nodeLists():
f = ScalarField1d(nodes.name(), nodes, mpi.rank)
domain.appendField(f)
self.p = plotFieldList(domain,
plotStyle = "points",
colorNodeLists = False,
colorDomains = True)
#===============================================================================
# Main testing class for the 2-D tests.
#===============================================================================
class TestSortAndDivideRedistribute2d(TestParmetisRedistribute2d):
# The actual test itself!
# Create a SortAndDivideRedistributeNodes object, have it redistribute the
# nodes.
def testIt(self):
print "Testing SortAndDivideRedistributeNodes2d on domain %i of %i domains" % \
(domainID, nDomains)
# Record how many nodes we're starting with.
nNodesGlobal = []
for nodeList in self.dataBase.nodeLists():
nNodesGlobal.append(mpi.allreduce(nodeList.numInternalNodes,
mpi.SUM))
# Go ahead and redistribute those nodes!
repartition = SortAndDivideRedistributeNodes2d(2.0)
repartition.redistributeNodes(self.dataBase)
# Make sure that the numbers of nodes are correct.
assert self.dataBase.numNodeLists == len(nNodesGlobal)
i = 0
for nodeList in self.dataBase.nodeLists():
n = mpi.allreduce(nodeList.numInternalNodes, mpi.SUM)
nGlobal = nNodesGlobal[i]
if n != nGlobal:
self.fail("Wrong number of nodes: %i != %i" % (n, nGlobal))
i += 1
## # Do a consistency check of the new distribution.
## nodeDistribution = repartition.currentDomainDecomposition(self.dataBase)
## if (not repartition.validDomainDecomposition(nodeDistribution,
## self.dataBase)):
## self.fail("Invalid domain decomposition.")
self.p = plotNodePositions2d(self.dataBase,
colorNodeLists = 0,
colorDomains = 1)
#===============================================================================
# Main testing class for the 3-D tests.
#===============================================================================
class TestSortAndDivideRedistribute3d(TestParmetisRedistribute3d):
# The actual test itself!
# Create a SortAndDivideRedistributeNodes object, have it redistribute the
# nodes.
def testIt(self):
print "Testing SortAndDivideRedistributeNodes3d on domain %i of %i domains" % \
(domainID, nDomains)
# Record how many nodes we're starting with.
nNodesGlobal = []
for nodeList in self.dataBase.nodeLists():
nNodesGlobal.append(mpi.allreduce(nodeList.numInternalNodes,
mpi.SUM))
# Go ahead and redistribute those nodes!
repartition = SortAndDivideRedistributeNodes3d(2.0)
repartition.redistributeNodes(self.dataBase)
# Make sure that the numbers of nodes are correct.
assert self.dataBase.numNodeLists == len(nNodesGlobal)
i = 0
for nodeList in self.dataBase.nodeLists():
n = mpi.allreduce(nodeList.numInternalNodes, mpi.SUM)
nGlobal = nNodesGlobal[i]
if n != nGlobal:
self.fail("Wrong number of nodes: %i != %i" % (n, nGlobal))
i += 1
## # Do a consistency check of the new distribution.
## nodeDistribution = repartition.currentDomainDecomposition(self.dataBase)
## if (not repartition.validDomainDecomposition(nodeDistribution,
## self.dataBase)):
## self.fail("Invalid domain decomposition.")
```
#### File: unit/FieldOperations/TestGradDivVectorFieldList.py
```python
from Spheral import *
from SpheralTestUtilities import *
from SpheralGnuPlotUtilities import *
################################################################################
n1, n2 = 50, 50
rho1, rho2 = 1.0, 1.0
m1, m2 = 0.5*rho1/n1, 0.5*rho2/n2
eps0 = 0.0
v0 = 0.0
vMultiplier = 1.0
x0, x1 = 0.0, 1.0
gamma = 2.0
mu = 1.0
neighborSearchType = 3 # GatherScatter
numGridLevels = 10
topGridCellSize = 2.0
origin = Vector1d(0.0)
################################################################################
def vxFunction(x):
# return v0 + vMultiplier*x**3
# return v0 + vMultiplier*x**4
return v0 + vMultiplier*x**5
################################################################################
title('1-D FieldList Test')
eos = GammaLawGasMKS1d(gamma, mu)
nodes1 = SphNodeList1d(eos, n1)
nodes2 = SphNodeList1d(eos, n2)
output('nodes1.numNodes')
output('nodes2.numNodes')
#W = NBSplineKernel1d(5)
W = BSplineKernel1d()
#W = W4SplineKernel1d()
#W = GaussianKernel1d()
#W = SuperGaussianKernel1d()
#W = PiGaussianKernel1d(1.0)
#W = NSincPolynomialKernel1d(5)
#W = QuarticSplineKernel1d()
output('W')
kernelExtent = W.kernelExtent
# Set the table kernel for the FieldList divergence.
WT = TableKernel1d()
WT.setTableData(W, 1000)
output('WT')
import random
generator = random.Random()
ranMag = 0.1
dx1 = 0.5*(x1 - x0)/n1
dx2 = 0.5*(x1 - x0)/n2
for i in xrange(n1):
nodes1.positions[i] = (i + 0.5)*dx1
#nodes1.positions[i] = (i + 0.5 + ranMag*generator.uniform(-1.0,1.0))*dx1
for i in xrange(n2):
nodes2.positions[i] = 0.5 + (i + 0.5)*dx2
#nodes2.positions[i] = 0.5 + (i + 0.5 + ranMag*generator.uniform(-1.0,1.0))*dx2
output('nodes1.positions[:]')
output('nodes2.positions[:]')
nodes1.mass[:] = [m1]*nodes1.numNodes
nodes2.mass[:] = [m2]*nodes2.numNodes
for nodeID in xrange(nodes1.numNodes):
nodes1.velocity[nodeID].x = vxFunction(nodes1.positions[nodeID].x)
for nodeID in xrange(nodes2.numNodes):
nodes2.velocity[nodeID].x = vxFunction(nodes2.positions[nodeID].x)
h1 = 1.0/(2.01*dx1)
h2 = 1.0/(2.01*dx2)
for H in nodes1.Hfield:
H.xx = h1
for H in nodes2.Hfield:
H.xx = h2
output('nodes1.Hfield[:]')
output('nodes2.Hfield[:]')
neighbor1 = NestedGridNeighbor1d(nodes1,
neighborSearchType,
numGridLevels,
topGridCellSize,
origin,
kernelExtent)
nodes1.neighbor = neighbor1
neighbor2 = NestedGridNeighbor1d(nodes2,
neighborSearchType,
numGridLevels,
topGridCellSize,
origin,
kernelExtent)
nodes2.neighbor = neighbor2
nodes1.massDensity[:] = [rho1]*nodes1.numNodes
nodes2.massDensity[:] = [rho2]*nodes2.numNodes
output('nodes1.massDensity[:]')
output('nodes2.massDensity[:]')
#output('nodes.updateMassDensity(W)')
output('nodes1.updateWeight()')
output('nodes2.updateWeight()')
db = DataBase1d()
output('db')
output('db.appendNodeList(nodes1)')
output('db.appendNodeList(nodes2)')
output('db.numNodeLists')
output('db.numFluidNodeLists')
output('db.globalMass[:]')
output('db.fluidMass[:]')
output('db.globalPosition[:]')
output('db.fluidMassDensity[:]')
output('db.fluidSpecificThermalEnergy[:]')
output('db.fluidVelocity[:]')
output('db.fluidWeight[:]')
output('db.fluidHfield[:]')
velocity = db.fluidVelocity
##output('velocity[:]')
##output('velocity[0][:]')
##output('velocity[1][:]')
fluidPosition = db.fluidPosition
fluidWeight = db.fluidWeight
fluidMass = db.fluidMass
fluidRho = db.fluidMassDensity
fluidHfield = db.fluidHfield
# Create boundary conditions. We need at least this much to create the initial
# mass density field.
xPlane0 = Plane1d((x0), ( 1.0))
xPlane1 = Plane1d((x1), (-1.0))
xbc0 = ReflectingBoundary1d(xPlane0)
xbc1 = ReflectingBoundary1d(xPlane1)
boundaryConditions = [xbc0, xbc1]
# Enforce the boundary conditions.
for i in xrange(10):
for bc in boundaryConditions:
bc.setGhostNodes(db)
bc.applyFieldListGhostBoundary(fluidWeight)
bc.applyFieldListGhostBoundary(fluidMass)
bc.applyFieldListGhostBoundary(fluidRho)
bc.applyFieldListGhostBoundary(fluidHfield)
db.updateFluidMassDensity()
for nodes in [nodes1, nodes2]:
nodes.numGhostNodes = 0
nodes.neighbor.updateNodes()
nodes.updateWeight()
for bc in boundaryConditions:
bc.setGhostNodes(db)
bc.applyFieldListGhostBoundary(fluidWeight)
bc.applyFieldListGhostBoundary(fluidMass)
bc.applyFieldListGhostBoundary(fluidRho)
bc.applyFieldListGhostBoundary(fluidHfield)
for nodes in [nodes1, nodes2]:
nodes.neighbor.updateNodes()
for i in xrange(nodes.firstGhostNode, nodes.numNodes):
nodes.velocity[i].x = vxFunction(nodes.positions[i].x)
################################################################################
# Generate the analytic answer, grad(P) = 2.0*x
import Gnuplot
xans = array([0.0]*(n1 + n2))
i = 0
for nodeList in db.nodeLists():
for r in nodeList.positions[:nodeList.numInternalNodes]:
xans[i] = r.x
i = i + 1
#yans = 6.0*vMultiplier*xans
#yans = 12.0*vMultiplier*xans**2
yans = 20.0*vMultiplier*xans**3
ansData = Gnuplot.Data(xans, yans, with='lines', title='Analytic answer')
################################################################################
# Plot the direct, successive first derivatives against the known solution.
dummy = FieldFunctions()
# directGradDivVel = dummy.gradDivVectorFieldList1d(velocity,
# fluidPosition,
# fluidWeight,
# fluidMass,
# fluidRho,
# fluidHfield,
# WT,
# boundaryConditions)
# plotDirect = plotFieldList(directGradDivVel, yFunction='%s.x',
# plotStyle='points',
# winTitle = 'Direct second derivative of velocity.')
# plotDirect.replot(ansData)
# ################################################################################
# # Plot the simple second derivative method against the known solution.
# simpleGradDivVel = dummy.gradDivVectorFieldListSimple1d(velocity,
# fluidPosition,
# fluidWeight,
# fluidMass,
# fluidRho,
# fluidHfield,
# WT)
# plotSimple = plotFieldList(simpleGradDivVel, yFunction='%s.x',
# plotStyle='points',
# winTitle = 'Simple second derivative of velocity.')
# plotSimple.replot(ansData)
################################################################################
# Plot the golden second derivative method against the known solution.
goldenGradDivVel = dummy.gradDivVectorFieldListGolden1d(velocity,
fluidPosition,
fluidMass,
fluidMass,
fluidRho,
fluidHfield,
WT)
plotGolden = plotFieldList(goldenGradDivVel, yFunction='%s.x',
plotStyle='points',
winTitle = 'Golden second derivative of velocity.')
plotGolden.replot(ansData)
# ################################################################################
# # Plot the golden2 second derivative method against the known solution.
# golden2GradDivVel = dummy.gradDivVectorFieldListGolden21d(velocity,
# fluidPosition,
# fluidWeight,
# fluidMass,
# fluidRho,
# fluidHfield,
# WT)
# plotGolden2 = plotFieldList(golden2GradDivVel, yFunction='%s.x',
# plotStyle='points',
# winTitle = 'Golden2 second derivative of velocity.')
# plotGolden2.replot(ansData)
# ################################################################################
# # Plot the mash second derivative method against the known solution.
# mashGradDivVel = dummy.gradDivVectorFieldListMash1d(velocity,
# fluidPosition,
# fluidWeight,
# fluidMass,
# fluidRho,
# fluidHfield,
# WT)
# plotMash = plotFieldList(mashGradDivVel, yFunction='%s.x',
# plotStyle='points',
# winTitle = 'Mash second derivative of velocity.')
# plotMash.replot(ansData)
################################################################################
# Plot the pair wise direct second derivative method against the known solution.
pwGradDivVel = dummy.gradDivVectorFieldListPairWise1d(velocity,
fluidPosition,
fluidWeight,
fluidMass,
fluidRho,
fluidHfield,
WT)
plotPW = plotFieldList(pwGradDivVel, yFunction='%s.x',
plotStyle='points',
winTitle = 'Pair wise direct second derivative of velocity.')
plotPW.replot(ansData)
plotPW.refresh()
```
#### File: unit/FieldOperations/testSampleMultipleFields2Lattice1d.py
```python
import unittest
from Spheral import *
from SpheralTestUtilities import fuzzyEqual
import mpi
from testSampleMultipleFields2Lattice import TestSampleMultipleFields2Lattice
#===============================================================================
# 1-D tests.
#===============================================================================
class TestSampleMultipleFields2Lattice1d(TestSampleMultipleFields2Lattice,
unittest.TestCase):
#---------------------------------------------------------------------------
# Initialize the problem.
#---------------------------------------------------------------------------
def setUp(self):
from Spheral1d import (vector_of_int, Vector, Tensor, GammaLawGasMKS,
TableKernel, BSplineKernel, makeFluidNodeList,
ScalarField, VectorField, DataBase, Plane,
PeriodicBoundary)
self.ndim = 1
self.xmin = Vector(0.0)
self.xmax = Vector(1.0)
self.nsample = vector_of_int()
self.nsample.append(100)
# Tolerances for the test
self.scalarTol = 1.0e-5
self.vectorTol = 1.0e-3
self.tensorTol = 1.0e-4
n = 100
self.rho0 = 10.0
self.v0 = Vector(1.0)
self.eps0 = -1.0
self.gradv0 = Tensor(8.0)
x0, x1 = 0.0, 1.0
# Create the nodes and such.
self.eos = GammaLawGasMKS(5.0/3.0, 1.0)
self.WT = TableKernel(BSplineKernel())
self.nodes = makeFluidNodeList("nodes", self.eos)
# Distribute the nodes.
from DistributeNodes import distributeNodesInRange1d
distributeNodesInRange1d([(self.nodes, n, self.rho0, (x0, x1))])
# Set the velocities and energies.
self.nodes.velocity(VectorField("tmp", self.nodes, self.v0))
self.nodes.specificThermalEnergy(ScalarField("tmp", self.nodes, self.eps0))
self.db = DataBase()
self.db.appendNodeList(self.nodes)
# Create the boundary conditions.
p0 = Plane(Vector(0.0), Vector(1.0))
p1 = Plane(Vector(1.0), Vector(-1.0))
xbc = PeriodicBoundary(p0, p1)
self.bcs = [xbc]
try:
self.bcs.append(TreeDistributedBoundary1d.instance())
except:
if mpi.procs > 1:
raise RuntimeError, "Unable to get parallel boundary condition"
else:
pass
# Enforce boundaries.
db = DataBase()
db.appendNodeList(self.nodes)
for bc in self.bcs:
bc.setAllGhostNodes(db)
bc.finalizeGhostBoundary()
self.nodes.neighbor().updateNodes()
for bc in self.bcs:
bc.applyGhostBoundary(self.nodes.mass())
bc.applyGhostBoundary(self.nodes.massDensity())
bc.applyGhostBoundary(self.nodes.specificThermalEnergy())
bc.applyGhostBoundary(self.nodes.velocity())
for bc in self.bcs:
bc.finalizeGhostBoundary()
self.H0 = self.nodes.Hfield()[0]
return
def tearDown(self):
del self.nodes
#===============================================================================
# Run the tests
#===============================================================================
if __name__ == "__main__":
unittest.main()
```
#### File: unit/FileIO/testSiloFileIO.py
```python
from Spheral import *
from FileIOTestBase import *
import os
import unittest
#-------------------------------------------------------------------------------
# SiloFileIO tests.
#-------------------------------------------------------------------------------
class SiloFileIOTest(FileIOTestBase, unittest.TestCase):
def setUp(self):
self.n = 10 # 1000
self.intmin = -2**24
self.intmax = 2**24
self.unsignedmin = 0
self.unsignedmax = 2**32
self.doublemin = -1e50
self.doublemax = 1e50
self.constructor = SiloFileIO
# Size the NodeLists.
nodes1d.numInternalNodes = self.n
nodes2d.numInternalNodes = self.n
nodes3d.numInternalNodes = self.n
return
def tearDown(self):
return
def removeFile(self, filename):
os.remove(filename + ".silo")
#---------------------------------------------------------------------------
# Compoundarray
#---------------------------------------------------------------------------
def testCompoundarray(self):
db = silo.DBCreate("TestCompoundarray.silo",
silo.DB_CLOBBER, silo.DB_LOCAL, "some file", silo.DB_HDF5)
thpt = vector_of_vector_of_int([vector_of_int(range(100)), vector_of_int(range(10))])
elemNames = vector_of_string(["range(100)", "range(10)"])
opts = silo.DBoptlist(1024)
assert opts.addOption(silo.DBOPT_CYCLE, 10) == 0
assert opts.addOption(silo.DBOPT_DTIME, 100.0) == 0
assert silo.DBPutCompoundarray(db, "stuff", elemNames, thpt, opts) == 0
assert silo.DBClose(db) == 0
self.removeFile("TestCompoundarray")
return
#-------------------------------------------------------------------------------
# Run those tests.
#-------------------------------------------------------------------------------
if __name__ == "__main__":
#print "waiting..."
#raw_input()
unittest.main()
```
#### File: unit/Geometry/testEigen3d.py
```python
import unittest
from math import *
from SpheralTestUtilities import fuzzyEqual
from Spheral import *
# Create a global random number generator.
import random
random.seed(375)
rangen = random.Random()
ranrange = 1.0e8
#===============================================================================
# Compute an accuracy criterion based on how degenerate the eigen values are.
#===============================================================================
def degenerateFuzz(i, eigenvalues):
assert eigenvalues[0] <= eigenvalues[1] and eigenvalues[1] <= eigenvalues[2]
normalization = max([abs(x) for x in eigenvalues] + [1.0e-10])
if i == 0:
dx = abs(eigenvalues[1] - eigenvalues[0])/normalization
else:
dx = abs(eigenvalues[2] - eigenvalues[1])/normalization
assert dx >= 0.0
return max(1.0e-5, 1.0/(1.0 + 50.0*dx))
#===============================================================================
# Generate a random 3x3 symmetric tensor with known eigen values and eigen
# vectors.
#===============================================================================
def randomSymTensor3d(lam1 = None,
lam2 = None,
lam3 = None):
if lam1 is None:
lam1 = rangen.uniform(-ranrange, ranrange)
if lam2 is None:
lam2 = rangen.uniform(-ranrange, ranrange)
if lam3 is None:
lam3 = rangen.uniform(-ranrange, ranrange)
# Pick random Euler angles.
theta = rangen.uniform(0.0, 2.0*pi)
phi = rangen.uniform(0.0, pi)
psi = rangen.uniform(0.0, pi)
# Build the rotation matrix of eigen vectors.
R = Tensor3d(cos(psi)*cos(phi) - cos(theta)*sin(phi)*sin(psi),
-sin(psi)*cos(phi) - cos(theta)*sin(phi)*cos(psi),
sin(theta)*sin(phi),
cos(psi)*sin(phi) + cos(theta)*cos(phi)*sin(psi),
-sin(psi)*sin(phi) + cos(theta)*cos(phi)*cos(psi),
-sin(theta)*cos(phi),
sin(theta)*sin(psi),
sin(theta)*cos(psi),
cos(theta))
assert fuzzyEqual(R.Determinant(), 1.0)
check = R*R.Transpose()
for i in xrange(3):
for j in xrange(3):
if i == j:
assert fuzzyEqual(check(i,j), 1.0)
else:
assert fuzzyEqual(check(i,j), 0.0)
# Check the eigen vectors.
vec1 = R.getColumn(0)
vec2 = R.getColumn(1)
vec3 = R.getColumn(2)
assert fuzzyEqual(vec1.magnitude(), 1.0)
assert fuzzyEqual(vec2.magnitude(), 1.0)
assert fuzzyEqual(vec3.magnitude(), 1.0)
assert fuzzyEqual(vec1.dot(vec2), 0.0)
assert fuzzyEqual(vec3.dot(vec1), 0.0)
assert fuzzyEqual(vec3.dot(vec2), 0.0)
# Now put it all together into our final symmetric matrix.
A = SymTensor3d(lam1, 0.0, 0.0,
0.0, lam2, 0.0,
0.0, 0.0, lam3)
A.rotationalTransform(R)
# Return the tensor, it's eigen values, and the tensor of eigenvectors.
return A, Vector3d(lam1, lam2, lam3), R
#===============================================================================
# Test class for Tensor3d.eigenValues and Tensor3d.eigenVectors
#===============================================================================
class TestEigenVectors(unittest.TestCase):
#---------------------------------------------------------------------------
# setUp
#---------------------------------------------------------------------------
def setUp(self):
self.ntests = 10000
return
#---------------------------------------------------------------------------
# eigenValues (random input)
#---------------------------------------------------------------------------
def testRandomEigenValues(self):
for i in xrange(self.ntests):
A, vlam0, vectors0 = randomSymTensor3d()
lam0 = [x for x in vlam0]
lam0.sort()
vlam = A.eigenValues()
lam = [x for x in vlam]
lam.sort()
for (x, x0) in zip(lam, lam0):
self.failUnless(fuzzyEqual(x, x0, 1e-5),
"Eigen values %s do not equal expected values %s" % (str(lam), str(lam0)))
return
#---------------------------------------------------------------------------
# eigenValues (two equal eigenvalues)
#---------------------------------------------------------------------------
def testDoublyDegenerateEigenValues(self):
for i in xrange(self.ntests):
lam12 = rangen.uniform(-ranrange, ranrange)
A, vlam0, vectors0 = randomSymTensor3d(lam1 = lam12,
lam2 = lam12)
lam0 = [x for x in vlam0]
lam0.sort()
vlam = A.eigenValues()
lam = [x for x in vlam]
lam.sort()
for (x, x0) in zip(lam, lam0):
self.failUnless(fuzzyEqual(x, x0, 1e-3),
"Eigen values %s do not equal expected values %s" % (str(lam), str(lam0)))
return
#---------------------------------------------------------------------------
# eigenValues (three equal eigenvalues)
#---------------------------------------------------------------------------
def testTriplyDegenerateEigenValues(self):
for i in xrange(self.ntests):
lam123 = rangen.uniform(-ranrange, ranrange)
A, vlam0, vectors0 = randomSymTensor3d(lam1 = lam123,
lam2 = lam123,
lam3 = lam123)
lam0 = [x for x in vlam0]
lam0.sort()
vlam = A.eigenValues()
lam = [x for x in vlam]
lam.sort()
for (x, x0) in zip(lam, lam0):
self.failUnless(fuzzyEqual(x, x0, 1e-5),
"Eigen values %s do not equal expected values %s" % (str(lam), str(lam0)))
return
#---------------------------------------------------------------------------
# eigenValues (diagonal matrix input)
#---------------------------------------------------------------------------
def testDiagonalEigenValues(self):
for i in xrange(self.ntests):
lam1 = rangen.uniform(-ranrange, ranrange)
lam2 = rangen.uniform(-ranrange, ranrange)
lam3 = rangen.uniform(-ranrange, ranrange)
A = SymTensor3d(lam1, 0.0, 0.0,
0.0, lam2, 0.0,
0.0, 0.0, lam3)
lam0 = [lam1, lam2, lam3]
lam0.sort()
vlam = A.eigenValues()
lam = [x for x in vlam]
lam.sort()
for (x, x0) in zip(lam, lam0):
self.failUnless(fuzzyEqual(x, x0, 1e-5),
"Eigen values %s do not equal expected values %s" % (str(lam), str(lam0)))
return
#---------------------------------------------------------------------------
# eigenVectors (random input)
#---------------------------------------------------------------------------
def testRandomEigenVectors(self):
for i in xrange(self.ntests):
A, vlam0, vectors0 = randomSymTensor3d()
lam0 = [(vlam0(i), vectors0.getColumn(i)) for i in range(3)]
lam0.sort()
eigenVecs0 = [x[1] for x in lam0]
eigenStruct = A.eigenVectors()
lam = [(eigenStruct.eigenValues(i), eigenStruct.eigenVectors.getColumn(i)) for i in range(3)]
lam.sort()
eigenVecs = [x[1] for x in lam]
for i in xrange(len(x)):
lami = lam0[i]
veci = eigenVecs[i]
vec0 = eigenVecs0[i]
self.failUnless(fuzzyEqual(veci.magnitude(), 1.0),
"Eigen vector %s does not have unit magnitude" % str(veci))
self.failUnless(fuzzyEqual(abs(veci.dot(vec0)), 1.0, degenerateFuzz(i, [x[0] for x in lam0])),
"Eigen vector %s does not equal expected value %s for eigen values %s" % (str(veci), str(vec0), str(vlam0)))
return
#---------------------------------------------------------------------------
# eigenVectors (two equal eigenvalues)
#---------------------------------------------------------------------------
def testDoublyDegenerateEigenVectors(self):
for i in xrange(self.ntests):
lam12 = rangen.uniform(-ranrange, ranrange)
A, vlam0, vectors0 = randomSymTensor3d(lam1 = lam12,
lam2 = lam12)
lam0 = [(vlam0(i), vectors0.getColumn(i)) for i in range(3)]
lam0.sort()
eigenVecs0 = [x[1] for x in lam0]
eigenStruct = A.eigenVectors()
lam = [(eigenStruct.eigenValues(i), eigenStruct.eigenVectors.getColumn(i)) for i in range(3)]
lam.sort()
eigenVecs = [x[1] for x in lam]
for x in eigenVecs:
self.failUnless(fuzzyEqual(x.magnitude(), 1.0),
"Eigen vector %s does not have unit magnitude %s" % (str(x), str(eigenStruct.eigenVectors)))
# Identify the unique eigen value.
unique = -1
thpt = 0.0
degenerate = []
if (lam0[0][0] == lam0[1][0]):
unique = 2
degenerate = [0, 1]
thpt = abs(vlam0(2))/(abs(vlam0(0)) + 1.0e-10)
else:
unique = 0
degenerate = [1, 2]
thpt = abs(vlam0(0))/(abs(vlam0(1)) + 1.0e-10)
assert thpt > 0.0
if thpt > 1.0:
thpt = 1.0/thpt
# Does the eigenvector for the unique eigen value match?
self.failUnless(fuzzyEqual(abs(eigenVecs[unique].dot(eigenVecs0[unique])), 1.0, degenerateFuzz(unique, [x[0] for x in lam0])),
"Eigen vector %s does not equal expected value %s for eigen values %s, %s" % (str(eigenVecs[unique]),
str(eigenVecs0[unique]),
str(vlam0),
str(eigenStruct.eigenValues)))
# The remaining eigen values need only be perpendicular to each other and the unique
# value.
self.failUnless(fuzzyEqual(eigenVecs[0].dot(eigenVecs[1]), 0.0) and
fuzzyEqual(eigenVecs[0].dot(eigenVecs[2]), 0.0) and
fuzzyEqual(eigenVecs[1].dot(eigenVecs[2]), 0.0),
"Eigen vectors (%s, %s, %s) are not orthogonal\n%s" % (str(eigenVecs[0]),
str(eigenVecs[1]),
str(eigenVecs[2]),
str(eigenStruct.eigenValues)))
return
#---------------------------------------------------------------------------
# eigenVectors (three equal eigenvalues)
#---------------------------------------------------------------------------
def testTriplyDegenerateEigenVectors(self):
for i in xrange(self.ntests):
lam123 = rangen.uniform(-ranrange, ranrange)
A = SymTensor3d(lam123, 0.0, 0.0,
0.0, lam123, 0.0,
0.0, 0.0, lam123)
vlam0 = Vector3d(lam123, lam123, lam123)
vectors0 = [Vector3d(1, 0, 0),
Vector3d(0, 1, 0),
Vector3d(0, 0, 1)]
eigenStruct = A.eigenVectors()
for i in xrange(3):
vec = eigenStruct.eigenVectors.getColumn(i)
match = [fuzzyEqual(abs(vec.dot(vectors0[j])), 1.0) for j in xrange(len(vectors0))]
assert len(match) == len(vectors0)
assert sum(match) == 1
del vectors0[match.index(True)]
self.failUnless(len(vectors0) == 0,
"Failed triply degenerate eigen vector decomposition: %s %s." %
(str(eigenStruct.eigenVectors), str(lam123)))
return
#---------------------------------------------------------------------------
# eigenVectors (diagonal matrix input)
#---------------------------------------------------------------------------
def testDiagonalEigenVectors(self):
for i in xrange(self.ntests):
lam1 = rangen.uniform(-ranrange, ranrange)
lam2 = rangen.uniform(-ranrange, ranrange)
lam3 = rangen.uniform(-ranrange, ranrange)
A = SymTensor3d(lam1, 0.0, 0.0,
0.0, lam2, 0.0,
0.0, 0.0, lam3)
A = SymTensor3d(lam1, 0.0, 0.0,
0.0, lam2, 0.0,
0.0, 0.0, lam3)
lam0 = [(lam1, Vector3d(1, 0, 0)),
(lam2, Vector3d(0, 1, 0)),
(lam3, Vector3d(0, 0, 1))]
lam0.sort()
eigenVecs0 = [x[1] for x in lam0]
eigenStruct = A.eigenVectors()
lam = [(eigenStruct.eigenValues(i), eigenStruct.eigenVectors.getColumn(i)) for i in range(3)]
lam.sort()
eigenVecs = [x[1] for x in lam]
for (x, x0) in zip(eigenVecs, eigenVecs0):
self.failUnless(fuzzyEqual(x.magnitude(), 1.0),
"Eigen vector %s does not equal expected value %s" % (str(x), str(x0)))
self.failUnless(fuzzyEqual(abs(x.dot(x0)), 1.0),
"Eigen vector %s does not equal expected value %s" % (str(x), str(x0)))
return
if __name__ == "__main__":
unittest.main()
```
#### File: unit/Geometry/testInnerOuterProduct.py
```python
import unittest
from math import *
from SpheralTestUtilities import fuzzyEqual
# What dimensions are we testing?
from spheralDimensions import spheralDimensions
dims = spheralDimensions()
from Spheral import *
# Create a global random number generator.
import random
random.seed(710)
rangen = random.Random()
ranrange = (-1.0, 1.0)
# Choose a default overall tolerance for comparisons
tol = 1.0e-7
#===============================================================================
# Compare two tensor'ish types to some tolerance.
#===============================================================================
def isEqual(x, y,
tol = 1.0e-7):
if hasattr(x, "__getitem__"):
if len(x) != len(y):
return False
disc = sum([abs(xi - yi) for (xi, yi) in zip(x, y)])/len(x)
else:
disc = abs(x - y)
return disc < tol
#===============================================================================
# Generate a random geometric type.
#===============================================================================
def fillRandom(Constructor):
result = Constructor()
ndim = Constructor.nDimensions
nelem = Constructor.numElements
for i in xrange(Constructor.numElements):
result[i] = rangen.uniform(*ranrange)
if "Sym" in Constructor.__name__:
result = 0.5*(result + result.Transpose())
return result
#===============================================================================
# Test class for inner product.
#===============================================================================
class TestInnerProduct(unittest.TestCase):
#---------------------------------------------------------------------------
# scalar . value
#---------------------------------------------------------------------------
def testScalarDotThing(self):
for typestring in ("Vector%id", "Tensor%id", "SymTensor%id", "ThirdRankTensor%id"):
for dim in dims:
ttype = eval(typestring % dim)
x = rangen.uniform(*ranrange)
y = fillRandom(ttype)
result = innerProduct(x, y)
answer = ttype()
for i in xrange(ttype.numElements):
answer[i] = x*y[i]
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch for %s: %s != %s" % (ttype.__name__, result, answer))
return
#---------------------------------------------------------------------------
# value . scalar
#---------------------------------------------------------------------------
def testThingDotScalar(self):
for typestring in ("Vector%id", "Tensor%id", "SymTensor%id", "ThirdRankTensor%id"):
for dim in dims:
ttype = eval(typestring % dim)
x = rangen.uniform(*ranrange)
y = fillRandom(ttype)
result = innerProduct(y, x)
answer = ttype()
for i in xrange(ttype.numElements):
answer[i] = x*y[i]
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch for %s: %s != %s" % (ttype.__name__, result, answer))
return
#---------------------------------------------------------------------------
# vector . vector
#---------------------------------------------------------------------------
def testVectorDotVector(self):
for dim in dims:
ttype = eval("Vector%id" % dim)
x = fillRandom(ttype)
y = fillRandom(ttype)
result = innerProduct(x, y)
answer = 0.0
for i in xrange(dim):
answer += x[i]*y[i]
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# tensor . vector
#---------------------------------------------------------------------------
def testTensorDotVector(self):
for typestring in ("Tensor%id", "SymTensor%id"):
for dim in dims:
vtype = eval("Vector%id" % dim)
ttype = eval(typestring % dim)
x = fillRandom(ttype)
y = fillRandom(vtype)
result = innerProduct(x, y)
answer = vtype()
for i in xrange(dim):
for j in xrange(dim):
answer[i] += x(i,j)*y(j)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# vector . tensor
#---------------------------------------------------------------------------
def testVectorDotTensor(self):
for typestring in ("Tensor%id", "SymTensor%id"):
for dim in dims:
vtype = eval("Vector%id" % dim)
ttype = eval(typestring % dim)
x = fillRandom(vtype)
y = fillRandom(ttype)
result = innerProduct(x, y)
answer = vtype()
for i in xrange(dim):
for j in xrange(dim):
answer[i] += x(j)*y(j,i)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# vector . tensor
#---------------------------------------------------------------------------
def testVectorDotTensor(self):
for typestring in ("Tensor%id", "SymTensor%id"):
for dim in dims:
vtype = eval("Vector%id" % dim)
ttype = eval(typestring % dim)
x = fillRandom(vtype)
y = fillRandom(ttype)
result = innerProduct(x, y)
answer = vtype()
for i in xrange(dim):
for j in xrange(dim):
answer[j] += x(i)*y(i,j)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# thirdranktensor . vector
#---------------------------------------------------------------------------
def testThirdRankTensorDotVector(self):
for dim in dims:
vtype = eval("Vector%id" % dim)
trttype = eval("ThirdRankTensor%id" % dim)
ttype = eval("Tensor%id" % dim)
x = fillRandom(trttype)
y = fillRandom(vtype)
result = innerProduct(x, y)
answer = ttype()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
answer[dim*i + j] += x(i,j,k)*y(k)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch for %s.%s: %s != %s" % (trttype.__name__, ttype.__name__, result, answer))
return
#---------------------------------------------------------------------------
# vector . thirdranktensor
#---------------------------------------------------------------------------
def testVectorDotThirdRankTensor(self):
for dim in dims:
vtype = eval("Vector%id" % dim)
trttype = eval("ThirdRankTensor%id" % dim)
ttype = eval("Tensor%id" % dim)
x = fillRandom(vtype)
y = fillRandom(trttype)
result = innerProduct(x, y)
answer = ttype()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
answer[dim*j + k] += x(i)*y(i,j,k)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch for %s.%s: %s != %s" % (vtype.__name__, trttype.__name__, result, answer))
return
#---------------------------------------------------------------------------
# tensor . tensor
#---------------------------------------------------------------------------
def testTensorDotTensor(self):
for t1typestring in ("Tensor%id", "SymTensor%id"):
for t2typestring in ("Tensor%id", "SymTensor%id"):
for dim in dims:
atype = eval("Tensor%id" % dim)
t1type = eval(t1typestring % dim)
t2type = eval(t2typestring % dim)
x = fillRandom(t1type)
y = fillRandom(t2type)
result = innerProduct(x, y)
answer = atype()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
answer[i*dim + j] += x(i,k)*y(k,j)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch for %s.%s: %s != %s, max disc=%s" % (t1type.__name__, t2type.__name__, result, answer, (result - answer).maxAbsElement()))
return
#---------------------------------------------------------------------------
# thirdranktensor . tensor
#---------------------------------------------------------------------------
def testTensorDotThirdRankTensor(self):
for ttypestring in ("Tensor%id", "SymTensor%id"):
for dim in dims:
trttype = eval("ThirdRankTensor%id" % dim)
ttype = eval(ttypestring % dim)
x = fillRandom(trttype)
y = fillRandom(ttype)
result = innerProduct(x, y)
answer = trttype()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
for m in xrange(dim):
z = answer(i, j, k) + x(i, j, m)*y(m, k)
answer(i, j, k, z)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# tensor . thirdranktensor
#---------------------------------------------------------------------------
def testThirdRankTensorDotTensor(self):
for ttypestring in ("Tensor%id", "SymTensor%id"):
for dim in dims:
trttype = eval("ThirdRankTensor%id" % dim)
ttype = eval(ttypestring % dim)
x = fillRandom(ttype)
y = fillRandom(trttype)
result = innerProduct(x, y)
answer = trttype()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
for m in xrange(dim):
z = answer(i, j, k) + x(i, m)*y(m, j, k)
answer(i, j, k, z)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# fourthranktensor . vector
#---------------------------------------------------------------------------
def testFourthRankTensorDotVector(self):
for dim in dims:
vtype = eval("Vector%id" % dim)
trttype = eval("ThirdRankTensor%id" % dim)
frttype = eval("FourthRankTensor%id" % dim)
x = fillRandom(frttype)
y = fillRandom(vtype)
result = innerProduct(x, y)
answer = trttype()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
for m in xrange(dim):
z = answer(i,j,k) + x(i,j,k,m)*y(m)
answer(i, j, k, z)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# vector . fourthranktensor
#---------------------------------------------------------------------------
def testVectorDotFourthRankTensor(self):
for dim in dims:
vtype = eval("Vector%id" % dim)
trttype = eval("ThirdRankTensor%id" % dim)
frttype = eval("FourthRankTensor%id" % dim)
x = fillRandom(vtype)
y = fillRandom(frttype)
result = innerProduct(x, y)
answer = trttype()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
for m in xrange(dim):
z = answer(i,j,k) + x(m)*y(m,i,j,k)
answer(i, j, k, z)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# fourthranktensor . tensor
#---------------------------------------------------------------------------
def testFourthRankTensorDotTensor(self):
for ttypestring in ("Tensor%id", "SymTensor%id"):
for dim in dims:
r2type = eval(ttypestring % dim)
r4type = eval("FourthRankTensor%id" % dim)
x = fillRandom(r4type)
y = fillRandom(r2type)
result = innerProduct(x, y)
answer = r4type()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
for m in xrange(dim):
for n in xrange(dim):
z = answer(i, j, k, n) + x(i, j, k, m)*y(m, n)
answer(i, j, k, n, z)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# tensor . fourthranktensor
#---------------------------------------------------------------------------
def testTensorDotFourthRankTensor(self):
for ttypestring in ("Tensor%id", "SymTensor%id"):
for dim in dims:
r2type = eval(ttypestring % dim)
r4type = eval("FourthRankTensor%id" % dim)
x = fillRandom(r2type)
y = fillRandom(r4type)
result = innerProduct(x, y)
answer = r4type()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
for m in xrange(dim):
for n in xrange(dim):
z = answer(i, k, m, n) + x(i, j)*y(j, k, m, n)
answer(i, k, m, n, z)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# fourthranktensor . thirdranktensor
#---------------------------------------------------------------------------
def testFourthRankTensorDotThirdRankTensor(self):
for dim in dims:
r3type = eval("ThirdRankTensor%id" % dim)
r4type = eval("FourthRankTensor%id" % dim)
r5type = eval("FifthRankTensor%id" % dim)
x = fillRandom(r4type)
y = fillRandom(r3type)
result = innerProduct(x, y)
answer = r5type()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
for m in xrange(dim):
for n in xrange(dim):
for p in xrange(dim):
z = answer(i, j, k, n, p) + x(i, j, k, m)*y(m, n, p)
answer(i, j, k, n, p, z)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# thirdranktensor . fourthranktensor
#---------------------------------------------------------------------------
def testThirdRankTensorDotFourthRankTensor(self):
for dim in dims:
r3type = eval("ThirdRankTensor%id" % dim)
r4type = eval("FourthRankTensor%id" % dim)
r5type = eval("FifthRankTensor%id" % dim)
x = fillRandom(r3type)
y = fillRandom(r4type)
result = innerProduct(x, y)
answer = r5type()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
for m in xrange(dim):
for n in xrange(dim):
for p in xrange(dim):
z = answer(i, j, m, n, p) + x(i, j, k)*y(k, m, n, p)
answer(i, j, m, n, p, z)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# thirdranktensor . thirdranktensor
#---------------------------------------------------------------------------
def testThirdRankTensorDotThirdRankTensor(self):
for dim in dims:
r3type = eval("ThirdRankTensor%id" % dim)
r4type = eval("FourthRankTensor%id" % dim)
x = fillRandom(r3type)
y = fillRandom(r3type)
result = innerProduct(x, y)
answer = r4type()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
for m in xrange(dim):
for n in xrange(dim):
z = answer(i, j, m, n) + x(i, j, k)*y(k, m, n)
answer(i, j, m, n, z)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#===============================================================================
# Test class for outer product.
#===============================================================================
class TestOuterProduct(unittest.TestCase):
#---------------------------------------------------------------------------
# scalar x value
#---------------------------------------------------------------------------
def testScalarOuterThing(self):
for typestring in ("Vector%id", "Tensor%id", "SymTensor%id", "ThirdRankTensor%id"):
for dim in dims:
ttype = eval(typestring % dim)
x = rangen.uniform(*ranrange)
y = fillRandom(ttype)
result = outerProduct(x, y)
answer = ttype()
for i in xrange(ttype.numElements):
answer[i] = x*y[i]
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch for %s: %s != %s" % (ttype.__name__, result, answer))
return
#---------------------------------------------------------------------------
# value x scalar
#---------------------------------------------------------------------------
def testThingOuterScalar(self):
for typestring in ("Vector%id", "Tensor%id", "SymTensor%id", "ThirdRankTensor%id"):
for dim in dims:
ttype = eval(typestring % dim)
x = rangen.uniform(*ranrange)
y = fillRandom(ttype)
result = outerProduct(y, x)
answer = ttype()
for i in xrange(ttype.numElements):
answer[i] = x*y[i]
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch for %s: %s != %s" % (ttype.__name__, result, answer))
return
#---------------------------------------------------------------------------
# vector x vector
#---------------------------------------------------------------------------
def testVectorOuterVector(self):
for dim in dims:
type = eval("Vector%id" % dim)
ttype = eval("Tensor%id" % dim)
x = fillRandom(type)
y = fillRandom(type)
result = outerProduct(x, y)
answer = ttype()
for i in xrange(dim):
for j in xrange(dim):
answer(i, j, x[i]*y[j])
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# tensor x vector
#---------------------------------------------------------------------------
def testTensorOuterVector(self):
for typestring in ("Tensor%id", "SymTensor%id"):
for dim in dims:
vtype = eval("Vector%id" % dim)
ttype = eval(typestring % dim)
trttype = eval("ThirdRankTensor%id" % dim)
x = fillRandom(ttype)
y = fillRandom(vtype)
result = outerProduct(x, y)
answer = trttype()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
answer(i, j, k, x(i,j)*y(k))
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# vector x tensor
#---------------------------------------------------------------------------
def testVectorOuterTensor(self):
for typestring in ("Tensor%id", "SymTensor%id"):
for dim in dims:
vtype = eval("Vector%id" % dim)
ttype = eval(typestring % dim)
trttype = eval("ThirdRankTensor%id" % dim)
x = fillRandom(vtype)
y = fillRandom(ttype)
result = outerProduct(x, y)
answer = trttype()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
answer(i, j, k, x(i)*y(j,k))
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#===============================================================================
# Test class for double inner product.
#===============================================================================
class TestDoubleInnerProduct(unittest.TestCase):
#---------------------------------------------------------------------------
# tensor .. tensor
#---------------------------------------------------------------------------
def testTensorDoubleDotTensor(self):
for ttypestring1 in ("Tensor%id", "SymTensor%id"):
for ttypestring2 in ("Tensor%id", "SymTensor%id"):
for dim in dims:
t1type = eval(ttypestring1 % dim)
t2type = eval(ttypestring2 % dim)
x = fillRandom(t1type)
y = fillRandom(t2type)
result = innerDoubleProduct(x, y)
result2 = x.doubledot(y)
answer = 0.0
for i in xrange(dim):
for j in xrange(dim):
answer += x(i,j)*y(j,i)
self.failUnless(abs(result - answer) < 1.0e-10, "Mismatch: %s != %s" % (result, answer))
self.failUnless(abs(result2 - answer) < 1.0e-10, "Mismatch: %s != %s" % (result2, answer))
return
#---------------------------------------------------------------------------
# tensor .. thirdranktensor
#---------------------------------------------------------------------------
def testTensorDoubleDotThirdRankTensor(self):
for ttypestring in ("Tensor%id", "SymTensor%id"):
for dim in dims:
vtype = eval("Vector%id" % dim)
r2type = eval(ttypestring % dim)
r3type = eval("ThirdRankTensor%id" % dim)
x = fillRandom(r2type)
y = fillRandom(r3type)
result = innerDoubleProduct(x, y)
answer = vtype()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
answer[k] += x(i, j)*y(j, i, k)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# thirdranktensor .. tensor
#---------------------------------------------------------------------------
def testThirdRankTensorDoubleDotTensor(self):
for ttypestring in ("Tensor%id", "SymTensor%id"):
for dim in dims:
vtype = eval("Vector%id" % dim)
r2type = eval(ttypestring % dim)
r3type = eval("ThirdRankTensor%id" % dim)
x = fillRandom(r3type)
y = fillRandom(r2type)
result = innerDoubleProduct(x, y)
answer = vtype()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
answer[i] += x(i, j, k)*y(k, j)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# thirdranktensor .. thirdranktensor
#---------------------------------------------------------------------------
def testThirdRankTensorDoubleDotThirdRankTensor(self):
for dim in dims:
r2type = eval("Tensor%id" % dim)
r3type = eval("ThirdRankTensor%id" % dim)
x = fillRandom(r3type)
y = fillRandom(r3type)
result = innerDoubleProduct(x, y)
answer = r2type()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
for m in xrange(dim):
z = answer(i,m) + x(i,j,k)*y(k,j,m)
answer(i,m,z)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# tensor .. fourthranktensor
#---------------------------------------------------------------------------
def testTensorDoubleDotFourthRankTensor(self):
for ttypestring in ("Tensor%id", "SymTensor%id"):
for dim in dims:
ttype = eval("Tensor%id" % dim)
r2type = eval(ttypestring % dim)
r4type = eval("FourthRankTensor%id" % dim)
x = fillRandom(r2type)
y = fillRandom(r4type)
result = innerDoubleProduct(x, y)
answer = ttype()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
for m in xrange(dim):
z = answer(k, m) + x(i, j)*y(j, i, k, m)
answer(k, m, z)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# fourthranktensor .. tensor
#---------------------------------------------------------------------------
def testFourthRankTensorDoubleDotTensor(self):
for ttypestring in ("Tensor%id", "SymTensor%id"):
for dim in dims:
ttype = eval("Tensor%id" % dim)
r2type = eval(ttypestring % dim)
r4type = eval("FourthRankTensor%id" % dim)
x = fillRandom(r4type)
y = fillRandom(r2type)
result = innerDoubleProduct(x, y)
answer = ttype()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
for m in xrange(dim):
z = answer(i, j) + x(i, j, k, m)*y(m, k)
answer(i, j, z)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# thirdranktensor .. fourthranktensor
#---------------------------------------------------------------------------
def testThirdRankTensorDoubleDotFourthRankTensor(self):
for dim in dims:
r3type = eval("ThirdRankTensor%id" % dim)
r4type = eval("FourthRankTensor%id" % dim)
x = fillRandom(r3type)
y = fillRandom(r4type)
result = innerDoubleProduct(x, y)
answer = r3type()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
for m in xrange(dim):
for n in xrange(dim):
z = answer(i, m, n) + x(i, j, k)*y(k, j, m, n)
answer(i, m, n, z)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# fourthranktensor .. thirdranktensor
#---------------------------------------------------------------------------
def testFourthRankTensorDoubleDotThirdRankTensor(self):
for dim in dims:
r3type = eval("ThirdRankTensor%id" % dim)
r4type = eval("FourthRankTensor%id" % dim)
x = fillRandom(r4type)
y = fillRandom(r3type)
result = innerDoubleProduct(x, y)
answer = r3type()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
for m in xrange(dim):
for n in xrange(dim):
z = answer(i, j, n) + x(i, j, k, m)*y(m, k, n)
answer(i, j, n, z)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# fourthranktensor .. fourthranktensor
#---------------------------------------------------------------------------
def testFourthRankTensorDoubleDotFourthRankTensor(self):
for dim in dims:
r4type = eval("FourthRankTensor%id" % dim)
x = fillRandom(r4type)
y = fillRandom(r4type)
result = innerDoubleProduct(x, y)
answer = r4type()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
for m in xrange(dim):
for n in xrange(dim):
for p in xrange(dim):
z = answer(i, j, n, p) + x(i, j, k, m)*y(m, k, n, p)
answer(i, j, n, p, z)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# tensor .. fifthranktensor
#---------------------------------------------------------------------------
def testTensorDoubleDotFifthRankTensor(self):
for ttypestring in ("Tensor%id", "SymTensor%id"):
for dim in dims:
r2type = eval(ttypestring % dim)
r3type = eval("ThirdRankTensor%id" % dim)
r5type = eval("FifthRankTensor%id" % dim)
x = fillRandom(r2type)
y = fillRandom(r5type)
result = innerDoubleProduct(x, y)
answer = r3type()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
for m in xrange(dim):
for n in xrange(dim):
z = answer(k,m,n) + x(i,j)*y(j,i,k,m,n)
answer(k,m,n,z)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# fifthranktensor .. tensor
#---------------------------------------------------------------------------
def testFifthRankTensorDoubleDotTensor(self):
for ttypestring in ("Tensor%id", "SymTensor%id"):
for dim in dims:
r2type = eval(ttypestring % dim)
r3type = eval("ThirdRankTensor%id" % dim)
r5type = eval("FifthRankTensor%id" % dim)
x = fillRandom(r5type)
y = fillRandom(r2type)
result = innerDoubleProduct(x, y)
answer = r3type()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
for m in xrange(dim):
for n in xrange(dim):
z = answer(i,j,k) + x(i,j,k,m,n)*y(n,m)
answer(i,j,k,z)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# thirdranktensor .. fifthranktensor
#---------------------------------------------------------------------------
def testThirdRankTensorDoubleDotFifthRankTensor(self):
for dim in dims:
r3type = eval("ThirdRankTensor%id" % dim)
r4type = eval("FourthRankTensor%id" % dim)
r5type = eval("FifthRankTensor%id" % dim)
x = fillRandom(r3type)
y = fillRandom(r5type)
result = innerDoubleProduct(x, y)
answer = r4type()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
for m in xrange(dim):
for n in xrange(dim):
for p in xrange(dim):
z = answer(i,m,n,p) + x(i,j,k)*y(k,j,m,n,p)
answer(i,m,n,p,z)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# fifthranktensor .. thirdranktensor
#---------------------------------------------------------------------------
def testFifthRankTensorDoubleDotThirdRankTensor(self):
for dim in dims:
r3type = eval("ThirdRankTensor%id" % dim)
r4type = eval("FourthRankTensor%id" % dim)
r5type = eval("FifthRankTensor%id" % dim)
x = fillRandom(r5type)
y = fillRandom(r3type)
result = innerDoubleProduct(x, y)
answer = r4type()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
for m in xrange(dim):
for n in xrange(dim):
for p in xrange(dim):
z = answer(i,j,k,p) + x(i,j,k,m,n)*y(n,m,p)
answer(i,j,k,p,z)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# fourthranktensor .. fifthranktensor
#---------------------------------------------------------------------------
def FourthRankTensorDoubleDotFifthRankTensor(self):
for dim in dims:
r4type = eval("FourthRankTensor%id" % dim)
r5type = eval("FifthRankTensor%id" % dim)
x = fillRandom(r4type)
y = fillRandom(r5type)
result = innerDoubleProduct(x, y)
answer = r5type()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
for m in xrange(dim):
for n in xrange(dim):
for p in xrange(dim):
for q in xrange(dim):
z = answer(i,j,n,p,q) + x(i,j,k,m)*y(m,k,n,p,q)
answer(i,j,n,p,q,z)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# fifthranktensor .. fourthranktensor
#---------------------------------------------------------------------------
def FifthRankTensorDoubleDotFourthRankTensor(self):
for dim in dims:
r4type = eval("FourthRankTensor%id" % dim)
r5type = eval("FifthRankTensor%id" % dim)
x = fillRandom(r5type)
y = fillRandom(r4type)
result = innerDoubleProduct(x, y)
answer = r5type()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
for m in xrange(dim):
for n in xrange(dim):
for p in xrange(dim):
for q in xrange(dim):
z = answer(i,j,k,p,q) + x(i,j,k,m,n)*y(n,m,p,q)
answer(i,j,k,p,q,z)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
if __name__ == "__main__":
unittest.main()
```
#### File: unit/Geometry/testVector.py
```python
from SpheralTestUtilities import fuzzyEqual
from math import *
import unittest
from Spheral import *
# Create a global random number generator.
import random
random.seed(889)
rangen = random.Random()
#-------------------------------------------------------------------------------
# Generic vector tests.
#-------------------------------------------------------------------------------
class VectorTestBase:
def testCopy(self):
v = self.VectorType(self.lhs)
for i in xrange(self.VectorType.nDimensions):
assert v(i) == self.lhs(i)
return
def testGetX(self):
assert self.lhs.x == 10.0
def testSetX(self):
check = rangen.uniform(-1e10, 1e10)
self.lhs.x = check
assert self.lhs.x == check
assert self.lhs(0) == check
def testZero(self):
self.lhs.Zero()
for i in xrange(self.VectorType.nDimensions):
assert self.lhs(i) == 0.0
return
def testNegative(self):
v = -(self.lhs)
for i in xrange(self.VectorType.nDimensions):
assert v(i) == -(self.lhs(i))
return
def testVectorAddition(self):
result = self.lhs + self.rhs
assert isinstance(result, self.VectorType)
for i in xrange(self.VectorType.nDimensions):
assert result(i) == self.lhs(i) + self.rhs(i)
return
def testVectorSubtraction(self):
result = self.lhs - self.rhs
assert isinstance(result, self.VectorType)
for i in xrange(self.VectorType.nDimensions):
assert result(i) == self.lhs(i) - self.rhs(i)
return
def testScalarMultiplication(self):
val = 44.0
result = self.lhs * val
assert isinstance(result, self.VectorType)
for i in xrange(self.VectorType.nDimensions):
assert result(i) == self.lhs(i) * val
return
def testScalarDivision(self):
val = 44.0
result = self.lhs / val
assert isinstance(result, self.VectorType)
for i in xrange(self.VectorType.nDimensions):
assert fuzzyEqual(result(i), self.lhs(i) / val)
return
def testInPlaceVectorAddition(self):
result = self.VectorType(self.lhs)
result += self.rhs
assert isinstance(result, self.VectorType)
for i in xrange(self.VectorType.nDimensions):
assert result(i) == self.lhs(i) + self.rhs(i)
return
def testInPlaceVectorSubtraction(self):
result = self.VectorType(self.lhs)
result -= self.rhs
assert isinstance(result, self.VectorType)
for i in xrange(self.VectorType.nDimensions):
assert result(i) == self.lhs(i) - self.rhs(i)
return
def testInPlaceScalarMultiplication(self):
val = 44.0
result = self.VectorType(self.lhs)
result *= val
assert isinstance(result, self.VectorType)
for i in xrange(self.VectorType.nDimensions):
assert result(i) == self.lhs(i) * val
return
def testInPlaceScalarDivision(self):
val = 44.0
result = self.VectorType(self.lhs)
result /= val
assert isinstance(result, self.VectorType)
for i in xrange(self.VectorType.nDimensions):
assert fuzzyEqual(result(i), self.lhs(i) / val)
return
def testEqual(self):
x = self.VectorType(self.lhs)
assert x == self.lhs
assert not (self.lhs == self.rhs)
return
def testNotEqual(self):
x = self.VectorType(self.lhs)
assert self.lhs != self.rhs
assert not (self.lhs != x)
return
def testLessThan(self):
assert self.rhs < self.lhs
assert not self.lhs < self.rhs
return
def testGreaterThan(self):
assert self.lhs > self.rhs
assert not self.rhs > self.lhs
return
def testLessThanOrEqual(self):
assert self.rhs <= self.lhs
assert self.lhs <= self.lhs
assert not self.lhs <= self.rhs
return
def testGreaterThanOrEqual(self):
assert self.lhs >= self.rhs
assert self.lhs >= self.lhs
assert not self.rhs >= self.lhs
return
def testDot(self):
result = self.lhs.dot(self.rhs)
check = 0.0
for i in xrange(self.VectorType.nDimensions):
check += self.lhs(i) * self.rhs(i)
assert result == check
def testSelfDyad(self):
result = self.lhs.selfdyad()
check = self.lhs.dyad(self.lhs)
assert isinstance(result, self.SymTensorType)
for i in xrange(self.VectorType.nDimensions):
for j in xrange(self.VectorType.nDimensions):
assert result(i,j) == check(i,j)
def testVectorMultiplication(self):
result = self.lhs * self.rhs
assert isinstance(result, self.TensorType)
check = self.lhs.dyad(self.rhs)
assert result == check
def testUnitVector(self):
result = self.lhs.unitVector()
assert isinstance(result, self.VectorType)
assert fuzzyEqual(result.magnitude(), 1.0)
assert fuzzyEqual(self.lhs.dot(result), self.lhs.magnitude())
def testMagnitude(self):
result = self.lhs.magnitude()
check = 0.0
for i in xrange(self.VectorType.nDimensions):
check += (self.lhs(i))**2
check = sqrt(check)
assert fuzzyEqual(result, check)
def testMagnitude2(self):
result = self.lhs.magnitude2()
check = 0.0
for i in xrange(self.VectorType.nDimensions):
check += (self.lhs(i))**2
assert fuzzyEqual(result, check)
def testMinElement(self):
result = self.lhs.minElement()
check = min([self.lhs(i) for i in range(self.VectorType.nDimensions)])
assert result == check
def testMaxElement(self):
result = self.lhs.maxElement()
check = max([self.lhs(i) for i in range(self.VectorType.nDimensions)])
assert result == check
def testSumElements(self):
result = self.lhs.sumElements()
check = sum([self.lhs(i) for i in range(self.VectorType.nDimensions)])
assert result == check
#-------------------------------------------------------------------------------
# 1-D
#-------------------------------------------------------------------------------
class Vector1dTest(VectorTestBase, unittest.TestCase):
def setUp(self):
self.VectorType = Vector1d
self.TensorType = Tensor1d
self.SymTensorType = SymTensor1d
self.lhs = Vector1d(10.0)
self.rhs = Vector1d(-1.0)
return
def tearDown(self):
return
def testCross(self):
result = self.lhs.cross(self.rhs)
assert isinstance(result, Vector3d)
assert ((result.x == 0.0) and
(result.y == 0.0) and
(result.z == 0.0))
def testDyad(self):
result = self.lhs.dyad(self.rhs)
assert isinstance(result, self.TensorType)
assert result.xx == self.lhs.x * self.rhs.x
#-------------------------------------------------------------------------------
# 2-D
#-------------------------------------------------------------------------------
class Vector2dTest(VectorTestBase, unittest.TestCase):
def setUp(self):
self.VectorType = Vector2d
self.TensorType = Tensor2d
self.SymTensorType = SymTensor2d
self.lhs = Vector2d(10.0, 431.0)
self.rhs = Vector2d(-1.0, -10.0)
return
def tearDown(self):
return
def testGetY(self):
assert self.lhs.y == 431.0
def testSetY(self):
check = rangen.uniform(-1e10, 1e10)
self.lhs.y = check
assert self.lhs.y == check
assert self.lhs(1) == check
def testCross(self):
result = self.lhs.cross(self.rhs)
assert isinstance(result, Vector3d)
check = Vector3d(0.0,
0.0,
self.lhs.x * self.rhs.y - self.lhs.y * self.rhs.x)
self.failUnless(result == check,
"cross product failure: %s != %s" % (str(result), str(check)))
def testDyad(self):
result = self.lhs.dyad(self.rhs)
assert isinstance(result, self.TensorType)
assert result == self.TensorType(self.lhs.x * self.rhs.x,
self.lhs.x * self.rhs.y,
self.lhs.y * self.rhs.x,
self.lhs.y * self.rhs.y)
#-------------------------------------------------------------------------------
# 3-D
#-------------------------------------------------------------------------------
class Vector3dTest(VectorTestBase, unittest.TestCase):
def setUp(self):
self.VectorType = Vector3d
self.TensorType = Tensor3d
self.SymTensorType = SymTensor3d
self.lhs = Vector3d(10.0, 431.0, 945.5)
self.rhs = Vector3d(-1.0, -10.0, -208.0)
return
def tearDown(self):
return
def testGetY(self):
assert self.lhs.y == 431.0
def testSetY(self):
check = rangen.uniform(-1e10, 1e10)
self.lhs.y = check
assert self.lhs.y == check
assert self.lhs(1) == check
def testGetZ(self):
assert self.lhs.z == 945.5
def testSetY(self):
check = rangen.uniform(-1e10, 1e10)
self.lhs.z = check
assert self.lhs.z == check
assert self.lhs(2) == check
def testCross(self):
result = self.lhs.cross(self.rhs)
assert isinstance(result, Vector3d)
check = Vector3d(self.lhs.y * self.rhs.z - self.lhs.z * self.rhs.y,
self.lhs.z * self.rhs.x - self.lhs.x * self.rhs.z,
self.lhs.x * self.rhs.y - self.lhs.y * self.rhs.x)
self.failUnless(result == check,
"cross product failure: %s != %s" % (str(result), str(check)))
def testDyad(self):
result = self.lhs.dyad(self.rhs)
assert isinstance(result, self.TensorType)
assert result == self.TensorType(self.lhs.x * self.rhs.x,
self.lhs.x * self.rhs.y,
self.lhs.x * self.rhs.z,
self.lhs.y * self.rhs.x,
self.lhs.y * self.rhs.y,
self.lhs.y * self.rhs.z,
self.lhs.z * self.rhs.x,
self.lhs.z * self.rhs.y,
self.lhs.z * self.rhs.z)
#-------------------------------------------------------------------------------
# Run those tests.
#-------------------------------------------------------------------------------
if __name__ == "__main__":
unittest.main()
```
#### File: unit/Hydro/testVoronoiHourglassControl1d.py
```python
from Spheral1d import *
from SpheralTestUtilities import *
from generateMesh import generateLineMesh
import numpy
import Gnuplot
#-------------------------------------------------------------------------------
# Command line parameters.
#-------------------------------------------------------------------------------
commandLine(nx = 100,
rho0 = 1.0,
rhoSlope = 1.0,
x0 = 0.0,
x1 = 1.0,
nPerh = 2.01,
gammaGas = 5.0/3.0,
mu = 1.0,
hmin = 1e-10,
hmax = 1.0,
hourglassOrder = 1,
hourglassLimiter = 1,
IntegratorConstructor = CheapSynchronousRK2Integrator,
steps = None,
goalTime = 0.15,
dt = 1e-4,
dtMin = 1.0e-5,
dtMax = 0.1,
dtGrowth = 2.0,
rigorousBoundaries = False,
maxSteps = None,
statsStep = 10,
HEvolution = IdealH,
iterations = 10,
graphics = True,
)
#-------------------------------------------------------------------------------
# Material.
#-------------------------------------------------------------------------------
eos = GammaLawGasMKS(gammaGas, mu)
#-------------------------------------------------------------------------------
# Interpolation kernels.
#-------------------------------------------------------------------------------
WT = TableKernel(BSplineKernel(), 1000)
#-------------------------------------------------------------------------------
# Make the NodeLists.
#-------------------------------------------------------------------------------
nodes = makeFluidNodeList("nodes1", eos,
hmin = hmin,
hmax = hmax,
nPerh = nPerh)
#-------------------------------------------------------------------------------
# Set the node properties.
#-------------------------------------------------------------------------------
from DistributeNodes import distributeNodesInRange1d
distributeNodesInRange1d([(nodes, [(nx, rho0, (x0, x1))])])
output("nodes.numNodes")
def setRho():
pos = nodes.positions()
rho = nodes.massDensity()
for i in xrange(nodes.numInternalNodes):
rho[i] = rho0 + rhoSlope*pos[i].x
return
#-------------------------------------------------------------------------------
# Construct a DataBase to hold our node list
#-------------------------------------------------------------------------------
db = DataBase()
output("db")
output("db.appendNodeList(nodes)")
output("db.numNodeLists")
output("db.numFluidNodeLists")
#-------------------------------------------------------------------------------
# Construct an hourglass control object.
#-------------------------------------------------------------------------------
hg = VoronoiHourglassControl(WT, hourglassOrder, hourglassLimiter)
output("hg")
output("hg.order")
output("hg.limiter")
packages = vector_of_Physics()
packages.append(hg)
#-------------------------------------------------------------------------------
# Create boundary conditions.
#-------------------------------------------------------------------------------
xPlane0 = Plane(Vector(x0), Vector( 1.0))
xPlane1 = Plane(Vector(x1), Vector(-1.0))
xbc0 = ReflectingBoundary(xPlane0)
xbc1 = ReflectingBoundary(xPlane1)
hg.appendBoundary(xbc0)
hg.appendBoundary(xbc1)
#-------------------------------------------------------------------------------
# Iteratively let the hourglass control adjust the point positions, and lets
# see where we converge to.
#-------------------------------------------------------------------------------
mass = nodes.mass()
pos = nodes.positions()
H = nodes.Hfield()
rho = nodes.massDensity()
def plotRho(p):
xarray = numpy.array([x.x for x in pos.internalValues()])
rhoarray = numpy.array([x for x in rho.internalValues()])
d = Gnuplot.Data(xarray, rhoarray, with_="linesp", title="iteration %i" % iter, inline=True)
p.replot(d)
def plotDx(p):
mesh, void = generateLineMesh([nodes], Vector(x0), Vector(x1), False, False, False)
xarray = numpy.array([x.x for x in pos.internalValues()])
n = len(xarray)
dxarray = numpy.array([float(i) for i in xrange(n)])
for i in xrange(n):
dxarray[i] = mesh.zone(i).volume()
d = Gnuplot.Data(xarray, dxarray, with_="linesp", title="iteration %i" % iter, inline=True)
p.replot(d)
if graphics:
p0, p1, p2 = Gnuplot.Gnuplot(), Gnuplot.Gnuplot(), Gnuplot.Gnuplot()
p0.title("Forced density profile")
p1.title("Summed density profile")
p2.title("Delta x")
for iter in xrange(iterations):
setRho()
if graphics:
plotRho(p0)
state = State()
for f in (pos, mass, rho, H):
state.enroll(f)
derivs = StateDerivatives(db, packages)
hg.registerState(db, state)
hg.registerDerivatives(db, derivs)
state.update(derivs, 1.0, 0.0, 1.0)
hg.finalize(0.0, 0.0, db, state, derivs)
if graphics:
db.updateConnectivityMap()
cm = db.connectivityMap()
posfl = state.vectorFields(HydroFieldNames.position)
massfl = state.scalarFields(HydroFieldNames.mass)
Hfl = state.symTensorFields(HydroFieldNames.H)
rhofl = state.scalarFields(HydroFieldNames.massDensity)
computeSPHSumMassDensity(cm, WT, posfl, massfl, Hfl, rhofl)
plotRho(p1)
plotDx(p2)
```
#### File: unit/Integrator/TestSynchronousRK2.py
```python
from Spheral import *
from SpheralTestUtilities import *
################################################################################
def plotState(nodes, color='black', plotGhosts=0):
from SpheralGistUtilities import *
from gist import *
if plotGhosts:
nx = nodes.numNodes
else:
nx = nodes.numInternalNodes
window(0)
xNodes = nodePositions1d(nodes)[:nx]
rhoNodes = array(nodes.massDensity[:nx])
plg(rhoNodes, xNodes, color=color)
pltitle('Mass density')
window(1)
vNodes = array([0.0]*nx)
for i in xrange(nx):
vNodes[i] = nodes.velocity[i].x
plg(vNodes, xNodes, color=color)
pltitle('Velocity')
window(2)
pressure = nodes.pressure
PNodes = array(pressure[:nx])
plg(PNodes, xNodes, color=color)
pltitle('Pressure')
window(3)
HNodes = array([0.0]*nx)
for i in xrange(nx):
HNodes[i] = 1.0/nodes.Hfield[i].xx
plg(HNodes, xNodes, color=color)
pltitle('Smoothing scale')
################################################################################
# Generic problem parameters
nx1, nx2 = 50, 50
rho1, rho2 = 1.0, 1.0
m1, m2 = 0.5*rho1/nx1, 0.5*rho2/nx2
P1, P2 = 1.0, 1.0
x0, x1, x2 = -0.5, 0.0, 0.5
gamma = 1.4
mu = 1.0
Cl = 0.75
Cq = 1.5
epsilon2 = 1e-2
HsmoothMin, HsmoothMax = 0.0001, 0.1
cfl = 0.1
neighborSearchType = 3 # GatherScatter
numGridLevels = 10
topGridCellSize = 0.25
origin = Vector1d(0.0)
goalTime = 0.5
dtMin, dtMax = 1e-5, 0.1
dt = 0.0001
maxSteps = 500
smoothIters = 0
sumForMassDensity = 0
################################################################################
title('1-D integrated hydro test -- planar Sod problem')
nx = nx1 + nx2
eos = GammaLawGasMKS1d(gamma, mu)
nodes1 = SphNodeList1d(nx1 + nx2, eos)
output('nodes1.numNodes')
W = BSplineKernel1d()
#W = W4SplineKernel1d()
#W = GaussianKernel1d()
#W = SuperGaussianKernel1d()
#W = PiGaussianKernel1d(1.0)
output('W')
kernelExtent = W.kernelExtent
# Set node positions
dx1 = (x1 - x0)/nx1
for ix in xrange(nx1 + nx2):
nodeID = ix
nodes1.positions[nodeID] = x0 + (ix + 0.5)*dx1
dx2 = (x2 - x1)/nx2
for ix in xrange(nx2):
nodeID = ix + nx1
nodes1.positions[nodeID] = x1 + (ix + 0.5)*dx2
# Set node masses
nodes1.mass[:nx1] = [m1]*nx1
nodes1.mass[nx1:] = [m2]*nx2
# Set node specific thermal energies
eps1 = P1/((gamma - 1.0)*rho1)
for nodeID in xrange(nx1):
nodes1.specificThermalEnergy[nodeID] = eps1
eps2 = P2/((gamma - 1.0)*rho2)
for nodeID in xrange(nx1, nx1 + nx2):
nodes1.specificThermalEnergy[nodeID] = eps2
# Set node velocities
for nodeID in xrange(nodes1.numNodes):
nodes1.velocity[nodeID] = (0.0)
# Set the smoothing scales.
h1 = 1.0/(2.01*dx1)
h2 = 1.0/(2.01*dx2)
for i in xrange(nx1):
nodes1.Hfield[i].xx = h1
for i in xrange(nx2, nx1 + nx2):
nodes1.Hfield[i].xx = h2
# Set the mass densities if required.
nodes1.massDensity[:nx1] = [rho1]*nx1
nodes1.massDensity[nx1:] = [rho2]*nx2
# Construct the neighbor object and associate it with the node list.
neighborTimer = SpheralTimer('Neighbor initialization.')
neighborTimer.start()
neighbor1 = NestedGridNeighbor1d(nodes1,
neighborSearchType,
numGridLevels,
topGridCellSize,
origin,
kernelExtent)
nodes1.neighbor = neighbor1
neighborTimer.stop()
neighborTimer.printStatus()
# Create boundary conditions. We need at least this much to create the initial
# mass density field.
xPlane0 = Plane1d((x0), ( 1.0))
xPlane1 = Plane1d((x2), (-1.0))
xbc0 = ReflectingBoundary1d(xPlane0)
xbc1 = ReflectingBoundary1d(xPlane1)
# Construct a DataBase to hold our node list
db = DataBase1d()
output('db')
output('db.appendNodeList(nodes1)')
output('db.numNodeLists')
output('db.numFluidNodeLists')
# Construct a standard Monaghan-Gingold artificial viscosity.
q = MonaghanGingoldViscosity1d(Cl, Cq)
q.epsilon2 = epsilon2
output('q')
output('q.epsilon2')
# Construct the hydro physics object.
hydro = FakeHydro1d(q)
hydro.cfl = cfl
output('hydro')
output('hydro.valid')
output('hydro.cfl')
# Construct a synchronous RK2 integrator, and add the one physics package and
# boundary condtion.
integrator = CheapSynchronousRK2Integrator1d(db)
output('integrator')
integrator.appendPhysicsPackage(hydro)
integrator.appendBoundary(xbc0)
integrator.appendBoundary(xbc1)
output('integrator.havePhysicsPackage(hydro)')
output('integrator.haveBoundary(xbc0)')
output('integrator.haveBoundary(xbc1)')
output('integrator.valid')
#output('integrator.initialize()')
integrator.HsmoothMin = HsmoothMin
integrator.HsmoothMax = HsmoothMax
output('integrator.HsmoothMin')
output('integrator.HsmoothMax')
integrator.lastDt = dt
output('integrator.lastDt')
integrator.dtMin = dtMin
output('integrator.dtMin')
integrator.dtMax = dtMax
output('integrator.dtMax')
integrator.sumForMassDensity = sumForMassDensity
output('integrator.sumForMassDensity')
control = SpheralController(integrator, W, boundaryConditions=[xbc0, xbc1])
output('control')
# Smooth the initial conditions.
control.smoothState(smoothIters)
##################################################################################
# Plot the initial conditions
plotState(nodes1)
# Advance to the end time.
control.advance(goalTime, maxSteps)
# Plot the final state.
plotState(nodes1, 'blue')
```
#### File: unit/Kernel/CompareKernels.py
```python
from Spheral import *
from SpheralTestUtilities import *
import Gnuplot
import numpy
from SpheralGnuPlotUtilities import *
################################################################################
def plotW(plot, W, xmin=0.0, xmax=2.0, numPnts=200, Hdet=1.0, title='',
lineTitle=''):
dx = (xmax - xmin)/(numPnts - 1)
x = numpy.array(range(numPnts))
y = numpy.array([0.0]*numPnts)
i1,i2,i3 = 0,0,0
x = dx*x + xmin
for i in xrange(numPnts):
y[i] = W(x[i], Hdet)
if (i>0):
i1 += abs(y[i]+y[i-1])/2.0 * dx
i2 += abs(y[i]+y[i-1])/2.0 * dx * x[i]
i3 += abs(y[i]+y[i-1])/2.0 * dx * x[i]*x[i]
Pi = 3.14159
print "{0:3.3f} {1:3.3f} {2:3.3f}".format(i1*2.0,i2*2.0,i3)
plot('set xrange [0:1]')
plot.xlabel('eta')
plot.ylabel('W')
if title:
plot.title(title)
ymax = 0
for i in xrange(numPnts):
if abs(y[i]) > ymax:
ymax = abs(y[i])
data = Gnuplot.Data(x/xmax, y/ymax, with_='lines', title=lineTitle)
plot.replot(data)
plot('set xrange[0:1]')
return
import sys, string
kernels = map(string.lower, sys.argv[1:])
print kernels
numPts = 51
dx = 1.0/(numPts - 1)
################################################################################
numPoints = 100
kernelDict = {'spline': [BSplineKernel2d()],
'w4spline': [W4SplineKernel2d()],
'wendlandc4': [WendlandC4Kernel2d()],
'wendlandc6': [WendlandC6Kernel2d()],
'expinv': [ExpInvKernel1d(),
ExpInvKernel2d(),
ExpInvKernel3d()],
'quartic': [QuarticSplineKernel3d()],
## 'gauss': [GaussianKernel1d(3),
## GaussianKernel2d(3),
## GaussianKernel3d(3)],
## 'supergauss': [SuperGaussianKernel1d(),
## SuperGaussianKernel2d(),
## SuperGaussianKernel3d()],
## 'pigauss': [PiGaussianKernel1d(1.0),
## PiGaussianKernel2d(1.0),
## PiGaussianKernel3d(1.0)],
## 'sinc': [SincKernel1d(2),
## SincKernel2d(2),
## SincKernel3d(2)],
## 'poly1': [NSincPolynomialKernel1d(1),
## NSincPolynomialKernel2d(1),
## NSincPolynomialKernel3d(1)],
## 'poly3': [NSincPolynomialKernel1d(3),
## NSincPolynomialKernel2d(3)],
## 'poly5': [NSincPolynomialKernel1d(5),
## NSincPolynomialKernel2d(5)],
## 'poly7': [NSincPolynomialKernel1d(7),
## NSincPolynomialKernel2d(7)],
'spline3': [NBSplineKernel2d(3)],
'spline5': [NBSplineKernel2d(5)],
'spline7': [NBSplineKernel2d(7)],
'spline9': [NBSplineKernel2d(9)],
## 'spline11': [NBSplineKernel1d(11),
## NBSplineKernel2d(11),
## NBSplineKernel3d(11)],
}
titleDict = {'spline': 'B Spline Kernel',
'h': 'H kernel',
'h10': 'H kernel (extent = 10)',
'quartic': 'Quartic Spline Kernel',
'w4spline': 'W4 Spline Kernel',
'gauss': 'Gaussian Kernel',
'supergauss': 'SuperGaussian Kernel',
'pigauss': 'Pi Gaussian Kernel',
'sinc': 'Sinc Kernel',
'poly1': 'Linear Polynomial Sinc approx Kernel',
'poly3': 'Cubic Polynomial Sinc approx Kernel',
'poly5': 'Quintic Polynomial Sinc approx Kernel',
'poly7': 'Septic Polynomial Sinc approx Kernel',
'spline3': '3rd order b spline Kernel',
'spline5': '5th order b spline Kernel',
'spline7': '7th order b spline Kernel',
'spline9': '9th order b spline Kernel',
'spline11': '11th order b spline Kernel',
'wendlandc4': 'Wendland C4 Kernel',
'wendlandc6': 'Wendland C6 Kernel',
'expinv' : 'Exponential inverse Kernel',
}
plots = []
for i in xrange(2):
plots.append(generateNewGnuPlot())
for kernel in kernels:
title(titleDict[kernel])
for W in kernelDict[kernel]:
# Build a tabular version of the kernel
WT = eval('TableKernel' + str(W).split()[0][-2:] + '(W, numPoints)')
output("WT")
output("WT.volumeNormalization")
output("WT.kernelExtent")
plotW(plots[-1], WT.kernelValue, 0.0, W.kernelExtent,
title='Kernels',
lineTitle = titleDict[kernel])
plotW(plots[-2], WT.gradValue, 0.0, W.kernelExtent,
title = 'Kernel Gradients',
lineTitle = titleDict[kernel])
```
#### File: unit/Kernel/kernelIntegrals.py
```python
from math import *
from Spheral import *
class Wintegral(ScalarFunctor):
def __init__(self, W, ndim, useGradientAsKernel):
assert ndim in (1, 2, 3)
self.W = W
self.ndim = ndim
self.useGradientAsKernel = useGradientAsKernel
ScalarFunctor.__init__(self)
return
def __call__(self, x):
if self.useGradientAsKernel:
result = abs(W.gradValue(x, 1.0))
else:
result = W.kernelValue(x, 1.0)
if self.ndim == 1:
return result
elif self.ndim == 2:
return pi*x*result
else:
return 2.0*pi*x*x*result
nperh = 2.0
deta = 1.0/nperh
neta = 5
etas1d, etas2d, etas3d = [], [], []
for ix in xrange(neta):
etas1d.append(Vector1d((ix + 0.5)*deta))
for iy in xrange(-neta + 1, neta):
etas2d.append(Vector2d((ix + 0.5)*deta, (iy + 0.5)*deta))
for iz in xrange(-neta + 1, neta):
etas3d.append(Vector3d((ix + 0.5)*deta, (iy + 0.5)*deta, (iz + 0.5)*deta))
for (W, ndim, etas, zero) in ((TableKernel1d(BSplineKernel1d(), 1000), 1, etas1d, Vector1d.zero),
(TableKernel2d(BSplineKernel2d(), 1000), 2, etas2d, Vector2d.zero),
(TableKernel3d(BSplineKernel3d(), 1000), 3, etas3d, Vector3d.zero)):
result = simpsonsIntegrationDouble(Wintegral(W, ndim, True), 0.0, W.kernelExtent, 1000)
print "Expected half zeroth moment in %i dimensions: %g" % (ndim, result)
Wsum = 0.0
W1sum = zero
for eta in etas:
Wi = abs(W.gradValue(eta.magnitude(), 1.0))
Wsum += Wi
W1sum += Wi*eta
W1sum /= Wsum
print "Result of summing W: ", Wsum, Wsum**(1.0/ndim), W1sum.magnitude() # , (Wsum/W.volumeNormalization)**(1.0/ndim), Wsum**(1.0/ndim)/W.volumeNormalization
```
#### File: unit/Mesh/testPolygonalMesh.py
```python
from math import *
import unittest
import time
from Spheral2d import *
from generateMesh import *
from SpheralTestUtilities import fuzzyEqual, testParallelConsistency
from SpheralGnuPlotUtilities import *
#===============================================================================
# Load mpi, and figure out how may domains to set up, and which domain we are.
#===============================================================================
import mpi
rank = mpi.rank
numDomains = mpi.procs
nxproc = int(sqrt(numDomains))
assert nxproc*nxproc == numDomains
#===============================================================================
# Create a global random number generator.
#===============================================================================
import random
rangen = random.Random()
#===============================================================================
# Some boundary conditions.
#===============================================================================
x0, x1 = 0.0, 1.0
y0, y1 = 0.0, 1.0
nx, ny = 16*nxproc, 16*nxproc
n = nx*ny
nperdomain = n / numDomains
nxcell = KeyTraits.maxKey1d/4
nycell = nxcell
assert nx < nxcell
ncell = nxcell*nycell
dxcell = (x1 - x0)/nxcell
dycell = (y1 - y0)/nycell
xmin = Vector(x0, y0)
xmax = Vector(x1, y1)
xbc0 = ReflectingBoundary(Plane(Vector(x0, y0), Vector(1.0, 0.0)))
ybc0 = ReflectingBoundary(Plane(Vector(x0, y0), Vector(0.0, 1.0)))
bclist = [xbc0, ybc0]
if numDomains > 1:
bclist.append(BoundingVolumeDistributedBoundary.instance())
bclist = []
#===============================================================================
# Iterate the H's to something reasonable.
#===============================================================================
def iterateThoseHs(nodes):
db = DataBase()
db.appendNodeList(nodes)
for bc in bclist:
bc.setAllGhostNodes(db)
nodes.neighbor().updateNodes()
for bc in bclist:
bc.finalizeGhostBoundary()
nodes.neighbor().updateNodes()
vecbound = vector_of_Boundary()
for bc in bclist:
vecbound.append(bc)
WT = TableKernel(BSplineKernel(), 1000)
smooth = SPHSmoothingScale()
iterateIdealH(db, vecbound, WT, smooth,
tolerance = 1.0e-4)
return
from SpheralGnuPlotUtilities import *
p = None
#===============================================================================
# A counter to help in creating unique NodeList names.
#===============================================================================
itest = 0
#===============================================================================
# Test class for tests to apply to all meshes.
#===============================================================================
class PolygonalMeshGenericTests:
#---------------------------------------------------------------------------
# Test numbers of elements.
#---------------------------------------------------------------------------
def testPolygonalMeshNums0(self):
t0 = time.clock()
mesh, void = generatePolygonalMesh([self.nodes],
xmin = xmin,
xmax = xmax,
generateVoid = False,
generateParallelConnectivity = False)
print "Required %f seconds to generate mesh" % (time.clock() - t0)
assert mesh.numZones == self.nodes.numInternalNodes
## p = plotPolygonalMesh(mesh, persist=True)
## p("set xrange [-0.1:1.1]; set yrange [-0.1:1.1]; set size square"); p.refresh()
## d = Gnuplot.Data([self.pos[i].x for i in xrange(self.nodes.numInternalNodes)],
## [self.pos[i].y for i in xrange(self.nodes.numInternalNodes)],
## with_ = "points"
## )
## p.replot(d)
#---------------------------------------------------------------------------
# Test element IDs.
#---------------------------------------------------------------------------
def testPolygonalMeshElementIDs(self):
t0 = time.clock()
mesh, void = generatePolygonalMesh([self.nodes],
xmin = xmin,
xmax = xmax,
generateVoid = False,
generateParallelConnectivity = False)
print "Required %f seconds to generate mesh" % (time.clock() - t0)
for i in xrange(self.nodes.numInternalNodes):
node = mesh.node(i)
assert node.ID == i
for i in xrange(mesh.numEdges):
edge = mesh.edge(i)
assert edge.ID == i
for i in xrange(mesh.numFaces):
face = mesh.face(i)
assert face.ID == i
for i in xrange(mesh.numZones):
zone = mesh.zone(i)
assert zone.ID == i
return
#---------------------------------------------------------------------------
# Test that the zones in the mesh correspond to the correct seed nodes.
#---------------------------------------------------------------------------
def testPolygonalMeshZoneOrder(self):
t0 = time.clock()
mesh, void = generatePolygonalMesh([self.nodes],
xmin = xmin,
xmax = xmax,
generateVoid = False,
generateParallelConnectivity = False)
print "Required %f seconds to generate mesh" % (time.clock() - t0)
assert mesh.numZones >= self.nodes.numInternalNodes
for i in xrange(self.nodes.numInternalNodes):
zonehull = mesh.zone(i).convexHull()
self.failUnless(zonehull.contains(self.pos[i]), "Failing generator containment: %i %s" % (i, self.pos[i]))
return
#---------------------------------------------------------------------------
# Test the minimum scale.
#---------------------------------------------------------------------------
def testPolygonalMeshMinimumScale(self):
t0 = time.clock()
mesh, void = generatePolygonalMesh([self.nodes],
xmin = xmin,
xmax = xmax)
print "Required %f seconds to generate mesh" % (time.clock() - t0)
self.failUnless(mesh.minimumScale <= self.dxmin,
"Scales don't match: %g %g" % (mesh.minimumScale, self.dxmin))
return
#---------------------------------------------------------------------------
# Test the parallel domain info.
#---------------------------------------------------------------------------
def testPolygonalMeshParallel(self):
t0 = time.clock()
mesh, void = generatePolygonalMesh([self.nodes],
xmin = xmin,
xmax = xmax,
generateParallelConnectivity = True)
print "Required %f seconds to generate mesh" % (time.clock() - t0)
## p = plotPolygonalMesh(mesh, persist=True)
## p("set xrange [-0.1:1.1]; set yrange [-0.1:1.1]; set size square"); p.refresh()
## d = Gnuplot.Data([self.pos[i].x for i in xrange(self.nodes.numInternalNodes)],
## [self.pos[i].y for i in xrange(self.nodes.numInternalNodes)],
## with_ = "points"
## )
## p.replot(d)
msg = testParallelConsistency(mesh, xmin, xmax)
self.failUnless(msg == "ok", msg)
#---------------------------------------------------------------------------
# Test the mesh coordinates hash uniquely.
#---------------------------------------------------------------------------
def testPolygonalMeshHash(self):
t0 = time.clock()
mesh, void = generatePolygonalMesh([self.nodes],
xmin = xmin,
xmax = xmax)
print "Required %f seconds to generate mesh" % (time.clock() - t0)
pos = [mesh.zone(i).position() for i in xrange(mesh.numZones)] + [mesh.node(i).position() for i in xrange(mesh.numNodes)]
boxInv = xmax - xmin
boxInv = Vector(1.0/boxInv.x, 1.0/boxInv.y)
hashes = [hashPosition(x, xmin, xmax, boxInv) for x in pos]
blarg = zip(hashes, pos)
blarg.sort()
for i in xrange(len(blarg) - 1):
hash0 = blarg[i][0]
hash1 = blarg[i+1][0]
self.failIf(hash0 == hash1,
"%i: Non-unique hash: %i %i %s %s %s %s %g" % (mpi.rank, i, mesh.numZones, str(hash0), str(hash1), str(blarg[i][1]), str(blarg[i+1][1]), (blarg[i][1] - blarg[i+1][1]).magnitude()))
return
#---------------------------------------------------------------------------
# Test the zones of the nodes.
#---------------------------------------------------------------------------
def testPolygonalMeshNodeZones(self):
t0 = time.clock()
mesh, void = generatePolygonalMesh([self.nodes],
xmin = xmin,
xmax = xmax)
print "Required %f seconds to generate mesh" % (time.clock() - t0)
answer = {}
for inode in xrange(mesh.numNodes):
answer[inode] = set()
for izone in xrange(mesh.numZones):
nodeIDs = mesh.zone(izone).nodeIDs
for inode in nodeIDs:
answer[inode].add(izone)
for inode in xrange(mesh.numNodes):
zoneIDs = mesh.node(inode).zoneIDs
for izone in zoneIDs:
self.failUnless(izone in answer[inode] or izone == PolygonalMesh.UNSETID,
"Missing zone %i for set in node %i: %s %s" %
(izone, inode, [x for x in zoneIDs], answer[inode]))
#---------------------------------------------------------------------------
# Test consistency of zone adjacency via node connection.
#---------------------------------------------------------------------------
def testPolygonalZoneAdjacency(self):
mesh, void = generatePolygonalMesh([self.nodes],
xmin = xmin,
xmax = xmax)
for izone in xrange(mesh.numZones):
nodeIDs = mesh.zone(izone).nodeIDs
for inode in nodeIDs:
self.failUnless(izone in mesh.node(inode).zoneIDs,
"Missing zone %i in neighbors for node %i : %s" % (izone, inode, list(mesh.node(inode).zoneIDs)))
#---------------------------------------------------------------------------
# Test the opposite zones across faces.
#---------------------------------------------------------------------------
def testPolygonalMeshOppZones(self):
mesh, void = generatePolygonalMesh([self.nodes],
xmin = xmin,
xmax = xmax)
answer = [[] for i in xrange(mesh.numFaces)]
for izone in xrange(mesh.numZones):
faces = mesh.zone(izone).faceIDs
for iface in faces:
answer[mesh.positiveID(iface)].append(izone)
for iface in xrange(mesh.numFaces):
face = mesh.face(iface)
zoneIDs = answer[iface]
assert len(zoneIDs) in (1, 2)
if len(zoneIDs) == 2:
self.failUnless(mesh.positiveID(face.oppositeZoneID(zoneIDs[0])) == zoneIDs[1],
"Bad opposites: (%i, %i) != (%i, %i)" %
(zoneIDs[0], zoneIDs[1],
face.oppositeZoneID(zoneIDs[0]), face.oppositeZoneID(zoneIDs[1])))
self.failUnless(mesh.positiveID(face.oppositeZoneID(zoneIDs[1])) == zoneIDs[0],
"Bad opposites: (%i, %i) != (%i, %i)" %
(zoneIDs[0], zoneIDs[1],
face.oppositeZoneID(zoneIDs[0]), face.oppositeZoneID(zoneIDs[1])))
else:
assert PolygonalMesh.positiveID(face.oppositeZoneID(zoneIDs[0])) == PolygonalMesh.UNSETID
#---------------------------------------------------------------------------
# Test the global mesh node IDs.
#---------------------------------------------------------------------------
def testGlobalMeshNodeIDs(self):
mesh, void = generatePolygonalMesh([self.nodes],
xmin = xmin,
xmax = xmax,
generateParallelConnectivity = True)
globalIDs = mesh.globalMeshNodeIDs()
# Check that all our local IDs are unique.
uniqueIDs = set()
for i in globalIDs:
uniqueIDs.add(i)
self.failUnless(len(uniqueIDs) == len(globalIDs),
"Global mesh node IDs not unique! %i != %i" % (len(globalIDs), len(uniqueIDs)))
# Check that the IDs are unique and consistent across domains.
if mpi.procs > 1:
neighbors = mesh.neighborDomains
sharedNodes = mesh.sharedNodes
assert len(neighbors) == len(sharedNodes)
# Translate to the shared nodes to global IDs.
sharedGlobalIDs = [[globalIDs[i] for i in localIDs] for localIDs in sharedNodes]
assert len(sharedGlobalIDs) == len(neighbors)
# Do non-blocking sends to all our neighbors.
sendRequests = []
for neighbor, ids in zip(neighbors, sharedGlobalIDs):
sendRequests.append(mpi.isend(ids, dest=neighbor))
assert len(sendRequests) == len(neighbors)
# Recv the IDs from our neighbors and do the testing.
for neighbor, localIDs in zip(neighbors, sharedGlobalIDs):
otherIDs = mpi.recv(source=neighbor)[0]
self.failUnless(otherIDs == list(localIDs),
"Global IDs don't match between domains %i <-> %i\n%s\n%s" % (mpi.rank, neighbor, list(localIDs), otherIDs))
# Wait until all our sends have completed.
for req in sendRequests:
req.Wait()
#---------------------------------------------------------------------------
# Test the bounding surface.
#---------------------------------------------------------------------------
def testBoundingSurface(self):
mesh, void = generatePolygonalMesh([self.nodes],
xmin = xmin,
xmax = xmax,
generateVoid = False,
generateParallelConnectivity = True)
bs = mesh.boundingSurface()
# f = open("surface.gnu", "w")
# f.write(str(bs))
# f.close()
# Check that all the generators are contained.
pos = self.nodes.positions()
for i in xrange(self.nodes.numInternalNodes):
self.failUnless(bs.contains(pos[i]),
"Failed containment for generator %i @ %s" % (i, pos[i]))
# Check that all mesh nodes are contained.
for i in xrange(mesh.numNodes):
self.failUnless(bs.contains(mesh.node(i).position()),
"Failed containment for mesh node %i @ %s" % (i, mesh.node(i).position()))
return
#===============================================================================
# Create a uniformly spaced nodes/mesh.
#===============================================================================
class UniformPolygonalMeshTests(unittest.TestCase, PolygonalMeshGenericTests):
#---------------------------------------------------------------------------
# Create the NodeList we'll use for generating the mesh.
#---------------------------------------------------------------------------
def setUp(self):
global itest
eos = GammaLawGasMKS(5.0/3.0, 1.0)
self.nodes = makeFluidNodeList("test nodes %i" % itest, eos,
numInternal = nperdomain,
nPerh = 2.01,
hmin = 1e-5,
hmax = 0.3)
itest += 1
self.pos = self.nodes.positions()
self.H = self.nodes.Hfield()
# Generate positions and split them up between domains appropriately.
dxproc = (x1 - x0)/nxproc
dyproc = (y1 - y0)/nxproc
ixproc = rank % nxproc
iyproc = rank / nxproc
xminproc = Vector(x0 + ixproc*dxproc, y0 + iyproc*dyproc)
xmaxproc = Vector(x0 + (ixproc + 1)*dxproc, y0 + (iyproc + 1)*dyproc)
dxavg = (x1 - x0)/nx
dyavg = (y1 - y0)/ny
self.dxmin = dxavg
xynodes_all = [Vector(x0 + (i % nx + 0.5)*dxavg, y0 + (i / nx + 0.5)*dyavg) for i in xrange(n)]
xynodes = [v for v in xynodes_all if testPointInBox(v, xminproc, xmaxproc)]
assert len(xynodes) == nperdomain
assert mpi.allreduce(len(xynodes), mpi.SUM) == n
# We now have the positions for each domain appropriately divided, so shuffle
# the local positions.
random.shuffle(xynodes)
# Now we can set the node conditions.
for i in xrange(nperdomain):
self.pos[i] = xynodes[i]
self.H[i] = SymTensor(1.0/(2.0*dxavg), 0.0,
0.0, 1.0/(2.0*dyavg))
self.nodes.neighbor().updateNodes()
# Fix up the H's.
#iterateThoseHs(self.nodes)
return
#---------------------------------------------------------------------------
# Standard destructor.
#---------------------------------------------------------------------------
def tearDown(self):
del self.nodes
return
#===============================================================================
# Create randomly spaced set of nodes in the unit square.
#===============================================================================
class RandomPolygonalMeshTests(unittest.TestCase, PolygonalMeshGenericTests):
#---------------------------------------------------------------------------
# Create the NodeList we'll use for generating the mesh.
#---------------------------------------------------------------------------
def setUp(self):
global itest
eos = GammaLawGasMKS(5.0/3.0, 1.0)
self.nodes = makeFluidNodeList("test nodes %i" % itest, eos,
numInternal = nperdomain,
nPerh = 2.01,
hmin = 1.0e-5,
hmax = 0.3)
itest += 1
self.pos = self.nodes.positions()
self.H = self.nodes.Hfield()
# Figure out the domain bounding volumes.
dxproc = (x1 - x0)/nxproc
dyproc = (y1 - y0)/nxproc
ixproc = rank % nxproc
iyproc = rank / nxproc
xminproc = Vector(x0 + ixproc*dxproc, y0 + iyproc*dyproc)
xmaxproc = Vector(x0 + (ixproc + 1)*dxproc, y0 + (iyproc + 1)*dyproc)
# Randomly seed the generators. We choose from random cells in order
# to keep nodes from getting too close together.
xynodes_all = []
occupiedCells = set()
for k in xrange(n):
i = rangen.randint(0, ncell)
while i in occupiedCells:
i = rangen.randint(0, ncell)
ix = i % nxcell
iy = i / nxcell
xynodes_all.append(Vector((ix + 0.5)*dxcell, (iy + 0.5)*dycell))
occupiedCells.add(i)
assert len(occupiedCells) == n
xynodes_all = mpi.bcast(xynodes_all)
xynodes = [v for v in xynodes_all if testPointInBox(v, xminproc, xmaxproc)]
dxavg = (x1 - x0)/nx
dyavg = (y1 - y0)/ny
self.dxmin = dxavg
assert mpi.allreduce(len(xynodes), mpi.SUM) == n
# Now we can set the node conditions.
self.nodes.numInternalNodes = len(xynodes)
for i in xrange(len(xynodes)):
self.pos[i] = xynodes[i]
self.H[i] = SymTensor(1.0/(2.0*dxavg), 0.0,
0.0, 1.0/(2.0*dyavg))
self.nodes.neighbor().updateNodes()
# Fix up the H's.
iterateThoseHs(self.nodes)
return
#---------------------------------------------------------------------------
# Standard destructor.
#---------------------------------------------------------------------------
def tearDown(self):
del self.nodes
return
#===============================================================================
# Run the tests
#===============================================================================
if __name__ == "__main__":
unittest.main()
```
#### File: unit/Mesh/testSharedElements.py
```python
import mpi
from Spheral2d import *
#----------------------------------------------------------------------
# Test that the shared nodes are consisten between domains.
#----------------------------------------------------------------------
def testSharedNodes(mesh):
assert len(mesh.neighborDomains) == len(mesh.sharedNodes)
# First check that everyone agrees about who is talking to who.
myNeighborDomains = list(mesh.neighborDomains)
for sendProc in xrange(mpi.procs):
otherProcs = mpi.bcast(myNeighborDomains, root=sendProc)
if mpi.rank != sendProc:
assert (mpi.rank in otherProcs) == (sendProc in mesh.neighborDomains)
# Build our set of global shared node IDs.
globalIDs = mesh.globalMeshNodeIDs()
globalSharedNodes = [[globalIDs[i] for i in localNodes] for localNodes in mesh.sharedNodes]
assert len(globalSharedNodes) == len(mesh.neighborDomains)
# Check that the shared nodes are consistent.
sendRequests = []
for (otherProc, ids) in zip(mesh.neighborDomains, globalSharedNodes):
sendRequests.append(mpi.isend(ids, dest=otherProc))
for (otherProc, ids) in zip(mesh.neighborDomains, globalSharedNodes):
otherIDs = mpi.recv(source=otherProc)[0]
assert ids == otherIDs
# Check that all shared nodes have been found.
localSharedNodes = [[i for i in localNodes] for localNodes in mesh.sharedNodes]
positions = vector_of_Vector()
for i in xrange(mesh.numNodes):
positions.append(mesh.node(i).position())
xmin, xmax = Vector(), Vector()
boundingBox(positions, xmin, xmax)
xmin = Vector(mpi.allreduce(xmin.x, mpi.MIN), mpi.allreduce(xmin.y, mpi.MIN))
xmax = Vector(mpi.allreduce(xmax.x, mpi.MAX), mpi.allreduce(xmax.y, mpi.MAX))
boxInv = Vector(1.0/(xmax.x - xmin.x),
1.0/(xmax.y - xmin.y))
nodeHashes = [hashPosition(mesh.node(i).position(), xmin, xmax, boxInv) for i in xrange(mesh.numNodes)]
nodeHashes2ID = {}
for i in xrange(len(nodeHashes)):
nodeHashes2ID[nodeHashes[i]] = i
for sendProc in xrange(mpi.procs):
otherNodeHashes = mpi.bcast(nodeHashes, root=sendProc)
if sendProc != mpi.rank:
for hashi in otherNodeHashes:
if hashi in nodeHashes:
assert sendProc in myNeighborDomains
idomain = myNeighborDomains.index(sendProc)
i = nodeHashes2ID[hashi]
assert i in localSharedNodes[idomain]
# Same for faces.
localSharedFaces = [[i for i in localFaces] for localFaces in mesh.sharedFaces]
positions = vector_of_Vector()
for i in xrange(mesh.numFaces):
positions.append(mesh.face(i).position())
faceHashes = [hashPosition(mesh.face(i).position(), xmin, xmax, boxInv) for i in xrange(mesh.numFaces)]
faceHashes2ID = {}
for i in xrange(len(faceHashes)):
faceHashes2ID[faceHashes[i]] = i
for sendProc in xrange(mpi.procs):
otherFaceHashes = mpi.bcast(faceHashes, root=sendProc)
if sendProc != mpi.rank:
for hashi in otherFaceHashes:
if hashi in faceHashes:
assert sendProc in myNeighborDomains
idomain = myNeighborDomains.index(sendProc)
i = faceHashes2ID[hashi]
assert i in localSharedFaces[idomain]
return True
```
#### File: unit/Mesh/testWritePolygonalMesh.py
```python
from math import *
import unittest
import shutil, os
import random
from Spheral2d import *
from generateMesh import *
from siloMeshDump import siloMeshDump
from SpheralTestUtilities import fuzzyEqual
#===============================================================================
# Load mpi, and figure out how may domains to set up, and which domain we are.
#===============================================================================
import mpi
rank = mpi.rank
numDomains = mpi.procs
nxproc = int(sqrt(numDomains))
assert nxproc*nxproc == numDomains
#===============================================================================
# Create a global random number generator.
#===============================================================================
import random
rangen = random.Random()
#===============================================================================
# Return a random string to help make test files unique.
#===============================================================================
def randomString():
l = range(20)
random.shuffle(l)
result = ""
for x in l:
result += str(x)
result = mpi.bcast(result, 0)
return result
#===============================================================================
# Some boundary conditions.
#===============================================================================
x0, x1 = 0.0, 1.0
y0, y1 = 0.0, 1.0
nx, ny = 16*nxproc, 16*nxproc
n = nx*ny
nperdomain = n / numDomains
nxcell = KeyTraits.maxKey1d/4
nycell = nxcell
assert nx < nxcell
ncell = nxcell*nycell
dxcell = (x1 - x0)/nxcell
dycell = (y1 - y0)/nycell
dxhash = (x1 - x0)/(KeyTraits.maxKey1d - KeyTraits.two)
xmin = Vector(x0, y0)
xmax = Vector(x1, y1)
xbc0 = ReflectingBoundary(Plane(Vector(x0, y0), Vector(1.0, 0.0)))
ybc0 = ReflectingBoundary(Plane(Vector(x0, y0), Vector(0.0, 1.0)))
bclist = [xbc0, ybc0]
if numDomains > 1:
bclist.append(BoundingVolumeDistributedBoundary.instance())
bclist = []
#===============================================================================
# Iterate the H's to something reasonable.
#===============================================================================
def iterateThoseHs(nodes):
db = DataBase()
db.appendNodeList(nodes)
for bc in bclist:
bc.setAllGhostNodes(db)
nodes.neighbor().updateNodes()
for bc in bclist:
bc.finalizeGhostBoundary()
nodes.neighbor().updateNodes()
vecbound = vector_of_Boundary()
for bc in bclist:
vecbound.append(bc)
WT = TableKernel(BSplineKernel(), 1000)
smooth = SPHSmoothingScale()
iterateIdealH(db, vecbound, WT, smooth,
tolerance = 1.0e-4)
return
#===============================================================================
# A counter to help in creating unique NodeList names.
#===============================================================================
itest = 0
#===============================================================================
# Test class for tests to apply to all meshes.
#===============================================================================
class PolygonalMeshSiloGenericTests:
#---------------------------------------------------------------------------
# Test writing the mesh to a silo file.
#---------------------------------------------------------------------------
def testPolygonalMeshWriteSilo(self):
mesh, void = generatePolygonalMesh([self.nodes],
xmin = xmin,
xmax = xmax,
generateVoid = False,
generateParallelConnectivity = True)
siloMeshDump("silo_testPolygonalMeshWriteSilo_%s" % self.testext,
mesh,
label = "Test dumping a polygonal mesh",
nodeLists = [self.nodes])
return
#===============================================================================
# Create a uniformly spaced nodes/mesh.
#===============================================================================
class UniformPolygonalMeshTests(unittest.TestCase, PolygonalMeshSiloGenericTests):
#---------------------------------------------------------------------------
# Create the NodeList we'll use for generating the mesh.
#---------------------------------------------------------------------------
def setUp(self):
global itest
self.testext = "Uniform%s_%idomain" % (randomString(), mpi.procs)
eos = GammaLawGasMKS(5.0/3.0, 1.0)
self.nodes = makeFluidNodeList("test nodes %i" % itest, eos,
numInternal = nperdomain,
nPerh = 2.01,
hmin = 1e-5,
hmax = 0.3)
itest += 1
self.pos = self.nodes.positions()
self.H = self.nodes.Hfield()
# Generate positions and split them up between domains appropriately.
dxproc = (x1 - x0)/nxproc
dyproc = (y1 - y0)/nxproc
ixproc = rank % nxproc
iyproc = rank / nxproc
xminproc = Vector(x0 + ixproc*dxproc, y0 + iyproc*dyproc)
xmaxproc = Vector(x0 + (ixproc + 1)*dxproc, y0 + (iyproc + 1)*dyproc)
dxavg = (x1 - x0)/nx
dyavg = (y1 - y0)/ny
self.dxmin = dxavg
xynodes_all = [Vector(x0 + (i % nx + 0.5)*dxavg, y0 + (i / nx + 0.5)*dyavg) for i in xrange(n)]
xynodes = [v for v in xynodes_all if testPointInBox(v, xminproc, xmaxproc)]
assert len(xynodes) == nperdomain
assert mpi.allreduce(len(xynodes), mpi.SUM) == n
# We now have the positions for each domain appropriately divided, so shuffle
# the local positions.
random.shuffle(xynodes)
# Now we can set the node conditions.
for i in xrange(nperdomain):
self.pos[i] = xynodes[i]
self.H[i] = SymTensor(1.0/(2.0*dxavg), 0.0,
0.0, 1.0/(2.0*dyavg))
self.nodes.neighbor().updateNodes()
# Fix up the H's.
#iterateThoseHs(self.nodes)
return
#---------------------------------------------------------------------------
# Standard destructor.
#---------------------------------------------------------------------------
def tearDown(self):
del self.nodes
if mpi.rank == 0:
os.remove("silo_testPolygonalMeshWriteSilo_%s.silo" % self.testext)
shutil.rmtree("silo_testPolygonalMeshWriteSilo_%s" % self.testext, ignore_errors=True)
return
#===============================================================================
# Create randomly spaced set of nodes in the unit square.
#===============================================================================
class RandomPolygonalMeshTests(unittest.TestCase, PolygonalMeshSiloGenericTests):
#---------------------------------------------------------------------------
# Create the NodeList we'll use for generating the mesh.
#---------------------------------------------------------------------------
def setUp(self):
global itest
self.testext = "Random%s_%idomain" % (randomString(), mpi.procs)
eos = GammaLawGasMKS(5.0/3.0, 1.0)
self.nodes = makeFluidNodeList("test nodes %i" % itest, eos,
numInternal = nperdomain,
nPerh = 2.01,
hmin = 1.0e-5,
hmax = 0.3)
itest += 1
self.pos = self.nodes.positions()
self.H = self.nodes.Hfield()
# Figure out the domain bounding volumes.
dxproc = (x1 - x0)/nxproc
dyproc = (y1 - y0)/nxproc
ixproc = rank % nxproc
iyproc = rank / nxproc
xminproc = Vector(x0 + ixproc*dxproc, y0 + iyproc*dyproc)
xmaxproc = Vector(x0 + (ixproc + 1)*dxproc, y0 + (iyproc + 1)*dyproc)
# Randomly seed the generators. We choose from random cells in order
# to keep nodes from getting too close together.
xynodes_all = []
occupiedCells = set()
for k in xrange(n):
i = rangen.randint(0, ncell)
while i in occupiedCells:
i = rangen.randint(0, ncell)
ix = i % nxcell
iy = i / nxcell
xynodes_all.append(Vector((ix + 0.5)*dxcell, (iy + 0.5)*dycell))
occupiedCells.add(i)
assert len(occupiedCells) == n
xynodes_all = mpi.bcast(xynodes_all)
xynodes = [v for v in xynodes_all if testPointInBox(v, xminproc, xmaxproc)]
dxavg = (x1 - x0)/nx
dyavg = (y1 - y0)/ny
self.dxmin = dxavg
assert mpi.allreduce(len(xynodes), mpi.SUM) == n
# Now we can set the node conditions.
self.nodes.numInternalNodes = len(xynodes)
for i in xrange(len(xynodes)):
self.pos[i] = xynodes[i]
self.H[i] = SymTensor(1.0/(2.0*dxavg), 0.0,
0.0, 1.0/(2.0*dyavg))
self.nodes.neighbor().updateNodes()
# Fix up the H's.
iterateThoseHs(self.nodes)
return
#---------------------------------------------------------------------------
# Standard destructor.
#---------------------------------------------------------------------------
def tearDown(self):
del self.nodes
if mpi.rank == 0:
os.remove("silo_testPolygonalMeshWriteSilo_%s.silo" % self.testext)
shutil.rmtree("silo_testPolygonalMeshWriteSilo_%s" % self.testext, ignore_errors=True)
return
#===============================================================================
# Run the tests
#===============================================================================
if __name__ == "__main__":
unittest.main()
```
#### File: unit/NodeList/Generate2dTestSetup.py
```python
from math import *
from Spheral import *
import random
class Generate2dTestSetup:
#===========================================================================
# Create a set of NodeLists and a DataBase for use in the tests.
#===========================================================================
def __init__(self,
asph = False,
seed = 'random',
nodesPerh = 2.01,
n1 = 1000,
n2 = 2500,
n3 = 500,
rmin1 = Vector2d(0.0, 0.0),
rmax1 = Vector2d(1.0, 1.0),
rmin2 = Vector2d(1.0, 0.0),
rmax2 = Vector2d(1.5, 1.0),
rmin3 = Vector2d(1.5, 0.0),
rmax3 = Vector2d(2.0, 1.0)):
self.n1 = n1
self.n2 = n2
self.n3 = n3
self.g = random.Random()
self.cache = []
neighborSearchType = Neighbor2d.NeighborSearchType.GatherScatter
numGridLevels = 10
topGridCellSize = 8.0
origin = Vector2d(0.0, 0.0)
kernelExtent = 2.0
hmax = 0.5
vol1 = (rmax1.x - rmin1.x)*(rmax1.y - rmin1.y)
vol2 = (rmax2.x - rmin2.x)*(rmax2.y - rmin2.y)
vol3 = (rmax3.x - rmin3.x)*(rmax3.y - rmin3.y)
rho1 = 1.0
rho2 = 1.0
rho3 = 1.0
m1 = vol1*rho1 * n1/(n1*n1 + 1e-30)
m2 = vol2*rho2 * n2/(n2*n2 + 1e-30)
m3 = vol3*rho3 * n3/(n3*n3 + 1e-30)
self.eos = GammaLawGasMKS2d(5.0/3.0, 1.0)
self.WT = TableKernel2d(BSplineKernel2d())
# Construct the NodeLists to be distributed
self.dataBase = DataBase2d()
self.dataBase.updateConnectivityMap()
if asph:
self.nodes1 = AsphNodeList2d("nodes 1", self.eos, self.WT, self.WT)
self.nodes2 = AsphNodeList2d("nodes 2", self.eos, self.WT, self.WT)
self.nodes3 = AsphNodeList2d("nodes 3", self.eos, self.WT, self.WT)
else:
self.nodes1 = SphNodeList2d("nodes 1", self.eos, self.WT, self.WT)
self.nodes2 = SphNodeList2d("nodes 2", self.eos, self.WT, self.WT)
self.nodes3 = SphNodeList2d("nodes 3", self.eos, self.WT, self.WT)
for nodes, n, rmin, rmax, m, rho in ((self.nodes1, n1, rmin1, rmax1, m1, rho1),
(self.nodes2, n2, rmin2, rmax2, m2, rho2),
(self.nodes3, n3, rmin3, rmax3, m3, rho3)):
if seed == 'random':
xyNodes = self.randomDistribute(n, rmin, rmax)
elif seed == 'lattice':
xyNodes = self.latticeDistribute(n, rmin, rmax)
nodes.numInternalNodes = n
nodes.nodesPerSmoothingScale = nodesPerh
nodes.hmax = hmax
Hi = self.determineH(n, rmin, rmax, nodesPerh)
nodes.mass(ScalarField2d("tmp", nodes, m))
nodes.massDensity(ScalarField2d("tmp", nodes, rho))
nodes.Hfield(SymTensorField2d("tmp", nodes, Hi))
nodes.updateWeight(self.dataBase.connectivityMap())
for i in xrange(n):
nodes.positions()[i] = xyNodes[i]
neighbor = NestedGridNeighbor2d(nodes,
neighborSearchType,
numGridLevels,
topGridCellSize,
origin,
kernelExtent)
nodes.registerNeighbor(neighbor)
self.cache.append(neighbor)
self.dataBase.appendNodeList(nodes)
return
#===========================================================================
# Calculate one over the smoothing scale for the given number of nodes and
# volume.
#===========================================================================
def determineH(self, nGlobal, rmin, rmax,
nNodesPerh = 2.01):
if nGlobal > 0:
vol = (rmax.y - rmin.y) * (rmax.x - rmin.x)
assert vol > 0.0
dV = vol/nGlobal
dx = sqrt(dV)
hi = 1.0/(nNodesPerh*dx)
Hi = SymTensor2d(hi, 0.0,
0.0, hi)
return Hi
else:
return SymTensor2d()
#===========================================================================
# Distribute nodes randomly in the given volume.
#===========================================================================
def randomDistribute(self,
nNodesGlobal, # global number of nodes in this nodelist
rmin, rmax): # total simulation volume
nodePositions = []
for globalNodeID in xrange(nNodesGlobal):
nodePositions.append(Vector2d(self.g.uniform(rmin.x, rmax.x),
self.g.uniform(rmin.y, rmax.y)))
assert len(nodePositions) == nNodesGlobal
return nodePositions
#===========================================================================
# Distribute nodes on a lattice in the given volume.
#===========================================================================
def latticeDistribute(self,
n, # global number of nodes in this nodelist
rmin, rmax): # total simulation volume
nodePositions = []
if n > 0:
nx = int(sqrt(n) + 1e-5)
assert nx*nx == n
dx = (rmax.x - rmin.x)/nx
dy = (rmax.y - rmin.y)/nx
for ix in xrange(nx):
for iy in xrange(nx):
nodePositions.append(Vector2d((ix + 0.5)*dx, (iy + 0.5)*dy))
assert len(nodePositions) == n
return nodePositions
```
#### File: unit/SolidMaterial/testTillotsonEquationOfState.py
```python
import unittest
from math import *
from SpheralTestUtilities import fuzzyEqual
from SolidSpheral1d import *
#===============================================================================
# Unit tests.
#===============================================================================
class TestTillotsonEquationOfState(unittest.TestCase):
#===========================================================================
# setUp
#===========================================================================
def setUp(self):
self.nsample = 100
self.Ptol = 1.0e-10
self.logRhoMin = log(1e-5)
self.logRhoMax = log(1e5)
self.drho = (self.logRhoMax - self.logRhoMin)/self.nsample
self.logEpsMin = log(1e-10)
self.logEpsMax = log(1e10)
self.deps = (self.logEpsMax - self.logEpsMin)/self.nsample
# I largely lift these parameters from Saito et al. (2008) for
# the Asteroid dolomite material, converted here for MKS units.
self.rho0 = 3.0e3
self.etamin, self.etamax = 1e-10, 1e10
self.a = 0.5
self.b = 0.6
self.A = 91.1e9
self.B = 32.1e9
self.alpha = 5.0
self.beta = 5.0
self.eps0 = 10.0e6
self.epsLiquid = 250.0e6
self.epsVapor = 1.4e9
self.atomicWeight = 20.12 # Taken from Murty 1962 for Limestone.
self.eos = TillotsonEquationOfStateMKS(self.rho0,
self.etamin,
self.etamax,
self.etamin,
self.etamax,
self.a,
self.b,
self.A,
self.B,
self.alpha,
self.beta,
self.eps0,
self.epsLiquid,
self.epsVapor,
self.atomicWeight)
self.nodes = makeFluidNodeList("test nodes", self.eos, numInternal=1)
#===========================================================================
# tearDown
#===========================================================================
def tearDown(self):
del self.nodes, self.eos
#===========================================================================
# Pressure analytic answer.
#===========================================================================
def P1(self, rhoi, epsi, etai, mui):
return (self.a + self.b/(1.0 + epsi/(self.eps0*etai*etai)))*rhoi*epsi + self.A*mui + self.B*mui*mui
def P2(self, rhoi, epsi, etai, mui):
return (self.a + self.b/(1.0 + epsi/(self.eps0*etai*etai)))*rhoi*epsi + self.A*mui
def P3(self, rhoi, epsi, etai, mui):
p2 = self.P2(rhoi, self.epsLiquid, etai, mui)
p4 = self.P4(rhoi, self.epsVapor, etai, mui)
return p2 + (p4 - p2)*(epsi - self.epsLiquid)/(self.epsVapor - self.epsLiquid)
def P4(self, rhoi, epsi, etai, mui):
return (self.a*rhoi*epsi +
(self.b*rhoi*epsi/(1.0 + epsi/(self.eps0*etai*etai)) +
self.A*mui*exp(self.beta*(1.0 - 1.0/etai)))*exp(-self.alpha*(1.0 - 1.0/etai)**2))
def Pans(self, rhoi, epsi):
etai = self.eos.boundedEta(rhoi)
mui = etai - 1.0
rho = etai*self.rho0
phi = self.b/(1.0 + epsi/(self.eps0*etai*etai));
chi = 1.0/etai - 1.0;
if mui >= 0.0:
return (self.a + phi)*rho*epsi + self.A*mui + self.B*mui*mui
elif epsi <= self.epsLiquid:
if etai > self.etamin:
return (self.a + phi)*rho*epsi + self.A*mui + self.B*mui*mui
else:
return 0.0
elif epsi >= self.epsVapor:
return self.a*rho*epsi + (phi*rho*epsi + self.A*mui*exp(-self.beta*chi))*exp(-self.alpha*chi*chi)
else:
if etai > self.etamin:
P2 = (self.a + phi)*rho*epsi + self.A*mui + self.B*mui*mui
else:
P2 = 0.0
P4 = self.a*rho*epsi + (phi*rho*epsi + self.A*mui*exp(-self.beta*chi))*exp(-self.alpha*chi*chi)
return P2 + (P4 - P2)*(epsi - self.epsLiquid)/(self.epsVapor - self.epsLiquid)
#===========================================================================
# dPdrho analytic answer.
#===========================================================================
def dPdrho1(self, rhoi, epsi, etai, mui):
return (self.dPdrho2(rhoi, epsi, etai, mui) +
2.0*self.B*(rhoi - self.rho0)/(self.rho0**2))
def dPdrho2(self, rhoi, epsi, etai, mui):
return (self.a*epsi +
self.A/self.rho0 +
2.0*self.b*epsi**2*self.eps0*(rhoi*self.rho0)**2/(self.eps0*rhoi**2 + epsi*self.rho0**2)**2 +
self.b*epsi*self.eps0*rhoi**2/(self.eps0*rhoi**2 + epsi*self.rho0**2))
def dPdrho3(self, rhoi, epsi, etai, mui):
dp2dhro = self.dP2drho(rhoi, epsi, etai, mui)
dp4drho = self.dP4drho(rhoi, epsi, etai, mui)
return dp2drho + (dp4drho - dp2drho)*(epsi - self.epsLiquid)/(self.epsVapor - self.epsLiquid)
def dPdrho4(self, rhoi, epsi, etai, mui):
return (self.a*epsi -
(2*self.alpha*exp(-self.alpha*((rhoi - self.rho0)/rhoi)**2) * self.rho0*(1.0 - 1.0/etai) *
(self.A*exp(self.beta*(1.0 - 1.0/etai))*(1.0/etai - 1.0) +
self.b*epsi*self.eps0*rhoi**3/(self.eps0*rhoi**2 + epsi*self.rho0**2)))/self.rho**2 +
exp(-self.alpha*((rhoi - self.rho0)/rhoi)**2)*
(self.A*exp(self.beta*(1.0 - 1.0/etai))*(rhoi**2 + self.beta*rhoi*self.rho0 - self.beta*self.rho0**2)/
(rhoi**2*self.rho0) +
self.b*epsi*self.eps0*rhoi**2*(self.eps0*rhoi**2 + 3.0*eps*self.rho0**2)/
(self.eps0*rhoi**2 + epsi*self.rho0**2)**2))
def dPdrhoans(self, rhoi, epsi):
etai = self.eos.boundedEta(rhoi)
mui = etai - 1.0
if mui >= 0.0:
print "Region 1"
return self.P1(rhoi, epsi, etai, mui)
elif epsi <= self.epsLiquid:
print "Region 2"
return self.P2(rhoi, epsi, etai, mui)
elif epsi <= self.epsVapor:
print "Region 3"
return self.P3(rhoi, epsi, etai, mui)
else:
print "Region 4"
return self.P4(rhoi, epsi, etai, mui)
#===========================================================================
# a
#===========================================================================
def testa(self):
assert self.eos.a == self.a
#===========================================================================
# b
#===========================================================================
def testb(self):
assert self.eos.b == self.b
#===========================================================================
# A
#===========================================================================
def testA(self):
assert self.eos.A == self.A
#===========================================================================
# B
#===========================================================================
def testB(self):
assert self.eos.B == self.B
#===========================================================================
# alpha
#===========================================================================
def testalpha(self):
assert self.eos.alpha == self.alpha
#===========================================================================
# beta
#===========================================================================
def testbeta(self):
assert self.eos.beta == self.beta
#===========================================================================
# eps0
#===========================================================================
def testeps0(self):
assert self.eos.eps0 == self.eps0
#===========================================================================
# epsLiquid
#===========================================================================
def testepsLiquid(self):
assert self.eos.epsLiquid == self.epsLiquid
#===========================================================================
# epsVapor
#===========================================================================
def testepsVapor(self):
assert self.eos.epsVapor == self.epsVapor
#===========================================================================
# atomicWeight
#===========================================================================
def testatomicWeight(self):
assert self.eos.atomicWeight == self.atomicWeight
#===========================================================================
# rho
#===========================================================================
def rho(self, i):
return exp(self.logRhoMin + (i + 0.5)*self.drho)
#===========================================================================
# eps
#===========================================================================
def eps(self, i):
return exp(self.logEpsMin + (i + 0.5)*self.deps)
#===========================================================================
# pressure
#===========================================================================
def testPressure(self):
rhof = ScalarField("rho", self.nodes)
epsf = ScalarField("eps", self.nodes)
Pf = ScalarField("pressure", self.nodes)
for irho in xrange(self.nsample):
rhof[0] = self.rho(irho)
for ieps in xrange(self.nsample):
epsf[0] = self.eps(ieps)
self.eos.setPressure(Pf, rhof, epsf)
Pi = Pf[0]
P0 = self.Pans(rhof[0], epsf[0])
eta = self.eos.boundedEta(rhof[0])
mu = eta - 1.0
phi = self.eos.computePhi(eta, epsf[0])
P2 = self.eos.computeP2(phi, mu, rhof[0], epsf[0])
self.failUnless(fuzzyEqual(Pi, P0, self.Ptol),
"Pressure do not match: P(%g, %g) = %g != %g\n P1=(%g,%g) P2=(%g,%g), P4=(%g,%g)\n eta=%g mu=%g phi=%g" %
(rhof[0], epsf[0], Pi, P0,
self.eos.computeP1(mu, P2), self.P1(rhof[0], epsf[0], eta, mu),
P2, self.P2(rhof[0], epsf[0], eta, mu),
self.eos.computeP4(phi, mu, eta, rhof[0], epsf[0]), self.P4(rhof[0], epsf[0], eta, mu),
eta, mu, phi))
return
#===========================================================================
# dPdrho
#===========================================================================
# def testdPdrho(self):
# for irho in xrange(self.nsample):
# rhoi = self.rho(irho)
# for ieps in xrange(self.nsample):
# epsi = self.eps(ieps)
# dPdrhoi = self.eos.computeDPDrho(rhoi, epsi)
# dPdrho0 = self.dPdrhoans(rhoi, epsi)
# self.failUnless(fuzzyEqual(dPdrhoi, dPdrho0, self.Ptol),
# "dP/drho does not match: dP/drho(%g, %g) = %g != %g" % (rhoi, epsi, dPdrhoi, dPdrho0))
# return
#===============================================================================
# Run the suckers.
#===============================================================================
if __name__ == "__main__":
unittest.main()
```
#### File: unit/Utilities/testDistances3d.py
```python
from Spheral3d import *
from SpheralTestUtilities import *
import unittest
# Create a global random number generator.
import random
rangen = random.Random()
#===============================================================================
# Test our methods for computing distances in 3-D.
#===============================================================================
class TestDistances3d(unittest.TestCase):
#===========================================================================
#
#===========================================================================
def setUp(self):
self.ntests = 100
self.multMin = 0.001
self.multMax = 1e6
return
#===========================================================================
# Randomly distort two line segments.
#===========================================================================
def randomDistortion(self, a0, a1, b0, b1):
l = rangen.uniform(self.multMin, self.multMax)
T = l*rotationMatrix(Vector(rangen.uniform(0.0, 1.0),
rangen.uniform(0.0, 1.0),
rangen.uniform(0.0, 1.0)).unitVector())
return T*a0, T*a1, T*b0, T*b1, l
#===========================================================================
# Non-overlapping, not parallel
#===========================================================================
def testNonOverlappingNonParallelSegments(self):
a0 = Vector(1.0, 1.0, 1.0)
a1 = Vector(2.0, 2.0, 2.0)
b0 = Vector(2.0, 2.0, 3.0)
b1 = Vector(5.0, 5.0, 3.0)
answer = 1.0
result1, result2 = Vector(), Vector()
for i in xrange(self.ntests):
aa0, aa1, bb0, bb1, l = self.randomDistortion(a0, a1, b0, b1)
result = segmentSegmentDistance(aa0, aa1, bb0, bb1)
self.failUnless(fuzzyEqual(result, l*answer),
"Distance error: %g != %g" % (result, l*answer))
#===========================================================================
# Non-overlapping, parallel
#===========================================================================
def testNonOverlappingParallelSegments1(self):
a0 = Vector(1.0, 1.0, 1.0)
a1 = Vector(2.0, 2.0, 2.0)
b0 = a0 + Vector(10.0, 10.0, 1.0)
b1 = a1 + Vector(10.0, 10.0, 1.0)
answer = (b0 - a1).magnitude()
result1, result2 = Vector(), Vector()
for i in xrange(self.ntests):
aa0, aa1, bb0, bb1, l = self.randomDistortion(a0, a1, b0, b1)
result = segmentSegmentDistance(aa0, aa1, bb0, bb1)
self.failUnless(fuzzyEqual(result, l*answer),
"Distance error: %g != %g" % (result, l*answer))
#===========================================================================
# Non-overlapping, parallel, but overlapping in a projected sense.
#===========================================================================
def testNonOverlappingParallelSegments2(self):
a0 = Vector(1.0, 1.0, 1.0)
a1 = Vector(2.0, 2.0, 2.0)
b0 = a0 + Vector(0.0, 0.0, 10.0)
b1 = a1 + Vector(0.0, 0.0, 10.0)
answer = (b0 - a1).magnitude()
result1, result2 = Vector(), Vector()
for i in xrange(self.ntests):
aa0, aa1, bb0, bb1, l = self.randomDistortion(a0, a1, b0, b1)
result = segmentSegmentDistance(aa0, aa1, bb0, bb1)
self.failUnless(fuzzyEqual(result, l*answer),
"Distance error: %g != %g" % (result, l*answer))
#===========================================================================
# Not parallel, non-intersecting.
#===========================================================================
def testNonOverlappingNonParallelSegments1(self):
a0 = Vector(1.0, 1.0, 1.0)
a1 = Vector(2.0, 2.0, 2.0)
b0 = a0 + Vector(3.0, 3.0, 3.0)
b1 = a1 + Vector(10.0, -10.0, 14.0)
answer = (b0 - a1).magnitude()
result1, result2 = Vector(), Vector()
for i in xrange(self.ntests):
aa0, aa1, bb0, bb1, l = self.randomDistortion(a0, a1, b0, b1)
result = segmentSegmentDistance(aa0, aa1, bb0, bb1)
self.failUnless(fuzzyEqual(result, l*answer),
"Distance error: %g != %g" % (result, l*answer))
#===========================================================================
# Not parallel, endpoint of b on middle of a
#===========================================================================
def testNonOverlappingIntersectingSegments1(self):
a0 = Vector(1.0, 1.0, 1.0)
a1 = Vector(2.0, 2.0, 2.0)
b0 = 0.5*(a0 + a1)
b1 = Vector(5.0, 3.0, 2.0)
answer = 0.0
result1, result2 = Vector(), Vector()
for i in xrange(self.ntests):
aa0, aa1, bb0, bb1, l = self.randomDistortion(a0, a1, b0, b1)
result = segmentSegmentDistance(aa0, aa1, bb0, bb1)
self.failUnless(fuzzyEqual(result, l*answer),
"Distance error: %g != %g" % (result, l*answer))
#===========================================================================
# Not parallel, intersecting
#===========================================================================
def testNonOverlappingIntersectingSegments2(self):
a0 = Vector(1.0, 1.0, 1.0)
a1 = Vector(2.0, 2.0, 2.0)
b0 = 0.5*(a0 + a1) - Vector(1.0, 1.0, -1.0)
b1 = 0.5*(a0 + a1) + Vector(1.0, 1.0, -1.0)
answer = 0.0
result1, result2 = Vector(), Vector()
for i in xrange(self.ntests):
aa0, aa1, bb0, bb1, l = self.randomDistortion(a0, a1, b0, b1)
result = segmentSegmentDistance(aa0, aa1, bb0, bb1)
self.failUnless(fuzzyEqual(result, l*answer),
"Distance error: %g != %g" % (result, l*answer))
if __name__ == "__main__":
unittest.main()
```
#### File: unit/Utilities/testSegmentIntersectPolygonEdges.py
```python
from math import *
from Spheral2d import *
from SpheralTestUtilities import *
import unittest
# Create a global random number generator.
import random
rangen = random.Random()
#===============================================================================
# Test whether a line segment intersects a polygon.
#===============================================================================
class TestLineSegmentPolygonIntersection(unittest.TestCase):
#===========================================================================
# Randomly distort two line segments.
#===========================================================================
def randomDistortion(self, a0, a1, vertices):
l = rangen.uniform(self.multMin, self.multMax)
T = l*rotationMatrix(Vector(rangen.uniform(0.0, 1.0),
rangen.uniform(0.0, 1.0),
rangen.uniform(0.0, 1.0)).unitVector())
verts = vector_of_Vector()
for x in vertices:
verts.append(T*x)
return T*a0, T*a1, Polygon(verts), T
#===========================================================================
# setUp
#===========================================================================
def setUp(self):
self.ntests = 1000
self.multMin = 0.001
self.multMax = 1.0e5
self.vertices = [Vector(1.0, 1.0), Vector(2.0, 1.0),
Vector(2.0, 2.0), Vector(1.0, 2.0)]
return
#===========================================================================
# Segment entirely outside the polygon
#===========================================================================
def testNonintersectingSegment1(self):
a0 = Vector(10.0, 10.0)
a1 = Vector(20.0, 0.0)
for i in xrange(self.ntests):
aa0, aa1, polygon, T = self.randomDistortion(a0, a1, self.vertices)
result = segmentIntersectEdges(aa0, aa1, polygon)
self.failUnless(result == False,
"Incorrectly intersected edge %s->%s with polygon" %
(aa0, aa1))
#===========================================================================
# Segment entirely inside the polygon
#===========================================================================
def testNonintersectingSegment2(self):
a0 = Vector(1.25, 1.25)
a1 = Vector(1.75, 1.75)
for i in xrange(self.ntests):
aa0, aa1, polygon, T = self.randomDistortion(a0, a1, self.vertices)
result = segmentIntersectEdges(aa0, aa1, polygon)
Tinv = T.Inverse()
self.failUnless(result == False,
"Incorrectly intersected edge %s->%s with polygon" %
(Tinv*aa0, Tinv*aa1))
#===========================================================================
# Segment intersecting a random side of the polygon
#===========================================================================
def testSegmentIntersectingRandomEdge1(self):
a0 = Vector(1.5, 1.5)
for i in xrange(self.ntests):
theta = random.uniform(0.0, 2.0*pi)
a1 = a0 + Vector(cos(theta), sin(theta))
aa0, aa1, polygon, T = self.randomDistortion(a0, a1, self.vertices)
result = segmentIntersectEdges(aa0, aa1, polygon)
Tinv = T.Inverse()
self.failUnless(result == True,
"Incorrectly missed intersection of edge %s->%s with polygon" %
(Tinv*aa0, Tinv*aa1))
#===========================================================================
# Interior segment with an endpoint on a random point of the polygon
#===========================================================================
def testSegmentIntersectingRandomEdge2(self):
a0 = Vector(1.5, 1.5)
deltas = []
nverts = len(self.vertices)
for i in xrange(nverts):
j = (i + 1) % nverts
deltas.append(self.vertices[j] - self.vertices[i])
assert len(deltas) == nverts
for i in xrange(self.ntests):
j = random.randint(0, nverts - 1)
a1 = self.vertices[j] + random.random()*deltas[j]
aa0, aa1, polygon, T = self.randomDistortion(a0, a1, self.vertices)
result = segmentIntersectEdges(aa0, aa1, polygon)
Tinv = T.Inverse()
self.failUnless(result == True,
"Incorrectly missed intersection of edge %s->%s with polygon" %
(Tinv*aa0, Tinv*aa1))
if __name__ == "__main__":
unittest.main()
```
#### File: unit/Utilities/testSegmentSegmentIntersection.py
```python
from Spheral2d import *
from SpheralTestUtilities import *
import unittest
# Create a global random number generator.
import random
rangen = random.Random()
#===============================================================================
# Test our various segement-segment intersection scenarios.
#===============================================================================
class TestSegmentSegmentIntersection(unittest.TestCase):
#===========================================================================
# Set up, create arrays of the function values.
#===========================================================================
def setUp(self):
self.ntests = 100
self.multMin = 0.001
self.multMax = 1.0e5
return
#===========================================================================
# Randomly distort two line segments.
#===========================================================================
def randomDistortion(self, a0, a1, b0, b1):
T = (rangen.uniform(self.multMin, self.multMax)*
rotationMatrix(Vector(rangen.uniform(0.0, 1.0),
rangen.uniform(0.0, 1.0)).unitVector()))
return T*a0, T*a1, T*b0, T*b1, T
#===========================================================================
# Non-overlapping
#===========================================================================
def testNonOverlappingSegments(self):
a0 = Vector(1.0, 1.0)
a1 = Vector(2.0, 2.0)
b0 = Vector(1.0, 2.0)
b1 = Vector(2.0, 3.0)
for i in xrange(self.ntests):
aa0, aa1, bb0, bb1, T = self.randomDistortion(a0, a1, b0, b1)
assert segmentSegmentIntersection(aa0, aa1, bb0, bb1)[0] == '0'
#===========================================================================
# Intersecting
#===========================================================================
def testIntersectingSegments(self):
a0 = Vector(1.0, 1.0)
a1 = Vector(2.0, 2.0)
b0 = Vector(1.0, 2.0)
b1 = Vector(2.0, 1.0)
for i in xrange(self.ntests):
aa0, aa1, bb0, bb1, T = self.randomDistortion(a0, a1, b0, b1)
code, result1, result2 = segmentSegmentIntersection(aa0, aa1, bb0, bb1)
assert code == '1'
assert result1 == result2
assert fuzzyEqual((result1 - T*Vector(1.5, 1.5)).magnitude(), 0.0)
#===========================================================================
# Intersecting at endpoint
#===========================================================================
def testIntersectingSegmentsOnEndpoint(self):
a0 = Vector(1.0, 1.0)
a1 = Vector(2.0, 2.0)
b0 = Vector(1.0, 2.0)
b1 = Vector(1.5, 1.5)
for i in xrange(self.ntests):
aa0, aa1, bb0, bb1, T = self.randomDistortion(a0, a1, b0, b1)
code, result1, result2 = segmentSegmentIntersection(aa0, aa1, bb0, bb1)
assert code == 'v'
assert result1 == result2
assert fuzzyEqual((result1 - T*Vector(1.5, 1.5)).magnitude(), 0.0)
#===========================================================================
# Overlapping segments.
#===========================================================================
def testOverlappingSegments(self):
a0 = Vector(1.0, 1.0)
a1 = Vector(2.0, 2.0)
b0 = Vector(3.0, 3.0)
b1 = Vector(1.5, 1.5)
for i in xrange(self.ntests):
aa0, aa1, bb0, bb1, T = self.randomDistortion(a0, a1, b0, b1)
code, result1, result2 = segmentSegmentIntersection(aa0, aa1, bb0, bb1)
assert code == 'e'
assert result1 != result2
if result1.magnitude2() > result2.magnitude2():
result1, result2 = result2, result1
assert fuzzyEqual((result1 - T*Vector(1.5, 1.5)).magnitude(), 0.0)
assert fuzzyEqual((result2 - T*Vector(2.0, 2.0)).magnitude(), 0.0)
if __name__ == "__main__":
unittest.main()
```
#### File: unit/Utilities/test_uniform_random.py
```python
from math import *
from Spheral import *
from SpheralTestUtilities import *
import unittest
# Create a global random number generator.
import random
rangen = random.Random()
ntests = 1000
class TestRandom01(unittest.TestCase):
#===========================================================================
# Various ways of constructing
#===========================================================================
def testConstructors(self):
seed1 = rangen.randint(1, 2**64)
seed3 = rangen.randint(1, 2**64)
while seed3 == seed1:
seed3 = rangen.randint(1, 2**64)
gen1 = uniform_random(seed1)
gen2 = uniform_random(gen1)
gen3 = uniform_random(seed3)
assert gen1 == gen2
assert gen1 != gen3
for i in xrange(ntests):
assert gen1() == gen2()
#===========================================================================
# seed
#===========================================================================
def testSeed(self):
seed = rangen.randint(1, 2**64)
gen1 = uniform_random(seed)
assert gen1.seed == seed
gen2 = uniform_random()
assert gen1 != gen2
gen2.seed = seed
assert gen2.seed == seed
for i in xrange(ntests):
assert gen1() == gen2()
#===========================================================================
# Comparisons
#===========================================================================
def testComparisons(self):
seed = rangen.randint(1, 2**64)
gen1 = uniform_random(seed)
gen2 = uniform_random(seed + 1)
assert gen1 != gen2
gen2.seed = seed
assert gen1 == gen2
gen3 = uniform_random(seed, 2.0, 3.0)
assert gen3 != gen1
gen1.range(2.0, 3.0)
assert gen3 == gen1
#===========================================================================
# advance
#===========================================================================
def testAdvance(self):
seed = rangen.randint(1, 2**64)
gen1 = uniform_random(seed)
throwaway = [gen1() for i in xrange(ntests)]
vals1 = [gen1() for i in xrange(ntests)]
gen2 = uniform_random(seed)
gen2.advance(ntests)
vals2 = [gen2() for i in xrange(ntests)]
assert vals1 == vals2
#===========================================================================
# range
#===========================================================================
def testRange(self):
seed = rangen.randint(1, 2**64)
gen1 = uniform_random(seed)
assert gen1.min == 0.0
assert gen1.max == 1.0
gen1.range(5.0, 10.0)
assert gen1.min == 5.0
assert gen1.max == 10.0
#===========================================================================
# Serialization
#===========================================================================
def testSerialize(self):
seed = rangen.randint(1, 2**64)
gen1 = uniform_random(seed)
throwaway = [gen1() for i in xrange(ntests)]
buf = vector_of_char()
gen1.serialize(buf)
gen2 = uniform_random()
i = gen2.deserialize(buf, 0)
assert i == len(buf)
vals1 = [gen1() for i in xrange(ntests)]
vals2 = [gen2() for i in xrange(ntests)]
assert vals1 == vals2
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jmikola/drivers-atlas-testing",
"score": 2
} |
#### File: drivers-atlas-testing/astrolabe/poller.py
```python
import logging
from time import sleep
from astrolabe.exceptions import PollingTimeoutError
from .timer import Timer
LOGGER = logging.getLogger(__name__)
class PollerBase:
"""Base class for implementing a poller."""
def __init__(self, *, frequency, timeout):
self.interval = 1.0 / frequency
self.timeout = timeout
@staticmethod
def _check_ready(obj, attribute, args, kwargs):
"""Abstract method that defines the readiness check used during
polling."""
raise NotImplementedError
def poll(self, objects, *, attribute, args, kwargs):
"""Wait for a member of `objects` to become ready. Once a member
is ready, return it to the caller. The values of `attribute`,
`args` and `kwargs` depends on the readiness check employed by the
implementation."""
timer = Timer()
timer.start()
while timer.elapsed < self.timeout:
logmsg = "Polling {} [elapsed: {:.2f} seconds]"
LOGGER.info(logmsg.format(objects, timer.elapsed))
for obj in objects:
return_value = self._check_ready(obj, attribute, args, kwargs)
if return_value:
return obj
LOGGER.debug("Waiting {:.2f} seconds before retrying".format(
self.interval))
sleep(self.interval)
raise PollingTimeoutError("Polling timed out after %s seconds" % self.timeout)
class BooleanCallablePoller(PollerBase):
"""A poller that selects objects based on the boolean return value of one
its methods."""
@staticmethod
def _check_ready(obj, attribute, args=(), kwargs={}):
"""A readiness check that evaluates to True if the `attribute`
method of the `obj` object returns boolean True when called with
the provided args and kwargs."""
return bool(getattr(obj, attribute)(*args, **kwargs))
def poll(check, timeout, subject):
timer = Timer()
timer.start()
ok = False
while timer.elapsed < timeout:
LOGGER.info('Waiting for %s; elapsed: %.1f sec' % (subject, timer.elapsed))
if check():
ok = True
break
else:
# Prevent unintentional busy loops, always sleep here even if
# the check function takes a non-trivial amount of time
# (e.g. if it performs network I/O).
sleep(1)
if not ok:
raise PollingTimeoutError("Timed out while waiting for %s" % subject)
``` |
{
"source": "jmikovic/leapp-actors",
"score": 2
} |
#### File: actors/rpmtransactionconfigtaskscollector/actor.py
```python
from leapp.actors import Actor
from leapp.models import RpmTransactionTasks
from leapp.tags import FactsPhaseTag, IPUWorkflowTag
from leapp.libraries.actor.scanner import load_tasks
CONFIGURATION_BASE_PATH='/etc/leapp/transaction'
class RpmTransactionConfigTasksCollector(Actor):
name = 'rpm_transaction_config_tasks_collector'
description = 'Loads additional Rpm transaction tasks from the {} directory.'.format(CONFIGURATION_BASE_PATH)
consumes = ()
produces = (RpmTransactionTasks,)
tags = (FactsPhaseTag, IPUWorkflowTag)
def process(self):
self.produce(load_tasks(CONFIGURATION_BASE_PATH, self.log))
``` |
{
"source": "jmikovic/leapp-repository",
"score": 2
} |
#### File: initramfs/targetinitramfsgenerator/actor.py
```python
from leapp.actors import Actor
from leapp.libraries.actor import targetinitramfsgenerator
from leapp.models import (
InitrdIncludes, # deprecated
InstalledTargetKernelVersion,
TargetInitramfsTasks,
)
from leapp.tags import IPUWorkflowTag, FinalizationPhaseTag
from leapp.utils.deprecation import suppress_deprecation
@suppress_deprecation(InitrdIncludes)
class TargetInitramfsGenerator(Actor):
"""
Regenerate RHEL-8 initrd and include files produced by other actors
"""
name = 'target_initramfs_generator'
consumes = (InitrdIncludes, InstalledTargetKernelVersion, TargetInitramfsTasks)
produces = ()
tags = (FinalizationPhaseTag, IPUWorkflowTag)
def process(self):
targetinitramfsgenerator.process()
``` |
{
"source": "jmilhone/fabry_perot",
"score": 3
} |
#### File: fabry_perot/bin/plasma_test_file.py
```python
from __future__ import division, print_function
#import sys
#sys.path.append("../")
import numpy as np
import matplotlib.pyplot as plt
from fabry.plasma import plasma
def main():
impact_factor = 0.3
Ti = 0.5
w0 = 488.0
mu = 40.0
Lnu = 50.0
Vouter = 10.0
rmax = 40.0
nr = 201
nlambda=2000
Lne = 2.0
R_outer = 35.0
r = np.linspace(0.0, rmax+5, nr)
v = plasma.pcx_velocity_profile(r, Lnu, R_outer, Vouter)
w, spec = plasma.calculate_pcx_chord_emission(impact_factor, Ti, w0, mu, Lnu, Vouter, nr=401)
fig, ax = plt.subplots()
ax.plot(w, spec)
plt.show()
if __name__ == "__main__":
main()
```
#### File: fabry_perot/bin/process_image.py
```python
from __future__ import division, absolute_import, print_function
from os.path import join, abspath
import argparse
import numpy as np
from fabry.tools import images, plotting, file_io
from fabry.core import fitting, ringsum
import matplotlib.pyplot as plt
import h5py
from scipy.stats import norm
# I should move interpolate_point and check_fwhm to somwhere in the fabry module soon
def interpolate_point((x1,y1), (x2, y2), thres):
slope = (y2-y1)/(x2-x1)
offset = y2 - slope*x2
point = (thres - offset) / slope
return point
def check_fwhm(r, sig):
pk_guess, _ = plotting.ringsum_click(r**2, sig, title='Please click on peaks')
indices = []
max_locs = []
for pk in pk_guess:
indices.append(fitting.determine_fit_range(r**2, sig, pk, thres=0.2))
for idx in indices:
loc = np.argmax(sig[idx])
loc += idx[0]
max_locs.append(loc)
right = []
left = []
idx_right = []
idx_left = []
for loc in max_locs:
half = 0.5 * sig[loc]
iR = np.min(np.where(sig[loc:] < half)) + loc
iL = np.max(np.where(sig[:loc] < half))
r_sq_L = interpolate_point((r[iL]**2, sig[iL]), (r[iL+1]**2, sig[iL+1]), half)
r_sq_R = interpolate_point((r[iR-1]**2, sig[iR-1]), (r[iR]**2, sig[iR]), half)
right.append(r_sq_R)
left.append(r_sq_L)
idx_right.append(iR)
idx_left.append(iL)
print(('Widths (px^2): ', [R-L for (R,L) in zip(right, left)]))
print([r[loc]**2 for loc in max_locs])
print(((r[max_locs[1]]**2 - r[max_locs[0]]**2) / (right[0]-left[0])))
fig, ax = plt.subplots()
ax.plot(r**2/1e6, sig)
for L, R in zip(idx_left, idx_right):
xx = [r[L], r[R]]
xx = [temp**2/1e6 for temp in xx]
yy = [sig[L], sig[L]]
ax.plot(xx, yy, '-k')
rr = [r[max_locs[0]]**2, r[max_locs[1]]**2]
rr = [temp/1e6 for temp in rr]
ss = [sig[max_locs[0]], sig[max_locs[0]]]
ss = [1.1*temp for temp in ss]
ax.plot(rr, ss, '-k')
ax.set_xlabel("R${}^2$ ($10^6$ px${}^2$)", fontsize=16)
ax.set_ylabel("Counts", fontsize=16)
ax.tick_params(labelsize=16)
plt.show()
#def remove_prof(r, sig, sig_sd, pk_guess=None, poly_num=5):
# '''
# removes a polyfit of order minima from ringsum
#
# Args:
# r (np.ndarray): binarr of ringsum
# sig (np.ndarray): ringsum to be subtracted
# poly_num (int, default=5): order used for polyfit
#
# Returns:
# peak_loc (np.ndarray): peak locations in r
# poff (np.ndarray): fit to be divided from sig
# '''
# ret_pk_guess = False
# if pk_guess is None:
# ret_pk_guess = True
# pk_guess, _ = plotting.ringsum_click(r,sig,title='Please click on peaks')
#
# peak_loc = np.zeros(len(pk_guess))
# peak_val = np.zeros(len(pk_guess))
# peak_wts = np.zeros(len(pk_guess))
# for i,pk in enumerate(pk_guess):
# idxs = fitting.determine_fit_range(r, sig, pk, thres=0.1)
# peak_loc[i],peak_val[i] = fitting.find_maximum(r[idxs],sig[idxs],returnval=True)
# peak_wts[i] = 1./(sig_sd[np.abs(peak_loc[i]-r).argmin()])
#
# p,cov = np.polyfit(peak_loc,peak_val,poly_num,w=peak_wts,cov=True)
# poff = np.polyval(p,r)
# poff_sd = np.sqrt(np.sum(np.diag(cov)))
# if ret_pk_guess:
# return pk_guess,poff,poff_sd
# else:
# return poff, poff_sd
def main(fname, bgfname=None, color='b', binsize=0.1, xguess=None,
yguess=None, block_center=False, click_center=True, find_center=True,
sub_prof=False, plotit=False, write=None, npix=1, return_tiff_mean=True,
tiff_image_idx=None):
bgdata = None
data = images.get_data(fname, color=color,
return_mean=return_tiff_mean, image_index=tiff_image_idx)
if npix > 1:
data = ringsum.super_pixelate(data, npix=npix)
if find_center:
if click_center:
xguess,yguess = plotting.center_plot(data)
x0,y0 = ringsum.locate_center(data, xguess=xguess, yguess=yguess,
block_center=block_center, binsize=0.1, plotit=True, printit=True)
if plotit:
fig,ax = plt.subplots(figsize=(10,8))
plotting.ring_plot(data, fax=(fig,ax))
ax.axhline(y0,color='b')
ax.axvline(x0,color='r')
fig.savefig('/home/milhone/fp_shot_2332.png', transparent=True)
plt.show()
else:
if xguess is None:
x0 = data.shape[1]/2.
else:
x0 = xguess
if yguess is None:
y0 = data.shape[0]/2.
else:
y0 = yguess
print('Performing Annual Sum...')
r, sig0,sig0_sd = ringsum.ringsum(data,x0,y0, use_weighted=False, quadrants=False, binsize=binsize, remove_hot_pixels=True)
if bgfname is not None:
print('Removing background...')
if bgdata is None:
bgdata = images.get_data(bgfname, color=color,
return_mean=return_tiff_mean, image_index=tiff_image_idx)
if npix > 1:
bgdata = ringsum.super_pixelate(bgdata, npix=npix)
_, bg,bg_sd = ringsum.ringsum(bgdata,x0,y0, use_weighted=False, binsize=binsize, remove_hot_pixels=True)
sig = sig0 - bg
sig_sd = np.sqrt(sig0_sd**2+bg_sd**2)
else:
sig,sig_sd = sig0,sig0_sd
dic = {'fname': abspath(fname), 'color': color, 'center': (x0, y0),
'binsize': binsize, 'r': r, 'sig': sig, 'sig_sd': sig_sd}
if bgfname is not None:
dic['bg_fname'] = abspath(bgfname)
if sub_prof:
dic['pk_guess'] = pk_guess
print('done!')
if write is not None:
file_io.prep_folder(write)
file_io.dict_2_h5(join(write,'ringsum.h5'),dic)
if write is not None or plotit:
fig,axs = plt.subplots(2,1,figsize=(12,8))
axs[0].errorbar(r, sig, yerr=sig_sd, fmt='-', errorevery=5, color='r', lw=2, zorder=10, ecolor='b')
axs[0].axhline(0,color='k',alpha=0.7)
axs[0].set_title('binsize={0}'.format(binsize))
axs[1].errorbar(r, sig0, yerr=sig0_sd, fmt='-', errorevery=5, lw=2,label='raw',zorder=10, color='C0', ecolor='C1')
axs[1].axhline(0,color='k',alpha=0.7)
if bgfname is not None:
axs[1].errorbar(r, bg, yerr=bg_sd, fmt='-', errorevery=5, lw=2,label='background',zorder=10, color='C2', ecolor='C3')
axs[1].set_xlabel('R (px)')
axs[1].legend()
if write:
file_io.prep_folder(join(write,'plots'))
plt.savefig(join(write,'plots','ringsum.png'),bbox_inches='tight')
if plotit:
plt.show()
else:
plt.close()
fig, ax = plt.subplots()
#ax.plot(r**2, sig, 'C1')
ax.errorbar(r, sig/1000, yerr=sig_sd/1000, color='C1')
#ax.plot(r, sig/1000.0, color='C1')
#ax.ticklabel_format(style='sci', axis='both', scilimits=(0,0))
#ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
#ax.set_xlabel(r"R${}^2$ (px${}^2$)", fontsize=18)
ax.set_xlabel("R (px)", fontsize=18)
ax.set_ylabel("Counts ($10^3$)", fontsize=18)
ax.tick_params(labelsize=18)
fig.tight_layout()
#fig.savefig('fp_ringsum_2332.pdf', transparent=True)
plt.show()
#
fig, ax = plt.subplots()
ax.plot(r, sig_sd / sig)
plt.show()
#check_fwhm(r, sig-sig.min())
return dic
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='stores image ringsum into Data folder')
parser.add_argument('fname',type=str,help='NEF filename of image you wish to process')
parser.add_argument('--background','-bg',type=str,default=None,help='NEF filename of\
background image')
parser.add_argument('--color', '-c', type=str, default='b', help='r,g,b color to use\
when ringsumming image: default is b')
parser.add_argument('--binsize', '-b', type=float, default=0.1, help='binsize for fine\
ringsum: default is 0.25')
parser.add_argument('-xy', type=float, nargs='+', default=None, help='x and y guess for\
the center of image, if not provided the center of the ccd is assumed')
parser.add_argument('--no_search', action='store_true',help='supresses the center finding\
algorthim and just uses the provided xy')
parser.add_argument('--no_click', action='store_true', help='supresses the interactive\
click plot for getting a center guess')
parser.add_argument('--block', '-bk', action='store_true', help='sets a 600x600 square\
block centered on xyguess for center finding')
parser.add_argument('--sub_prof', '-sp', action='store_true', help='perform a polyfit\
profile subtraction, launches an interactive plot')
parser.add_argument('--no_plot', action='store_true', help='supresses plots')
parser.add_argument('--write', '-w', type=str, default=None, help='saves data and plot to\
specified folder, default is None which will not write data.')
parser.add_argument('--npix', type=int, default=1, help='Super pixel size, default is 1, i.e. no superpixel')
parser.add_argument('--tiff_image_index', type=int, default=None,
help='if image is a tiff stack, this specifies the index to process')
parser.add_argument('--return_tiff_mean', action='store_true',
help='if image is a tiff stack, this returns the mean over the stack. Overrides tiff_image_index')
args = parser.parse_args()
plotit = not args.no_plot
click_center = not args.no_click
find_center = not args.no_search
if args.xy is None:
xguess = None
yguess = None
else:
xguess, yguess = args.xy
_ = main(args.fname, bgfname=args.background, color=args.color, binsize=args.binsize,
block_center=args.block, sub_prof=args.sub_prof,
write=args.write, plotit=plotit, click_center=click_center, xguess=xguess,
yguess=yguess, find_center=find_center, npix=args.npix,
return_tiff_mean=args.return_tiff_mean, tiff_image_idx=args.tiff_image_index)
```
#### File: fabry_perot/bin/run_multi_solver.py
```python
from __future__ import division, print_function
#import sys
import os.path as path
from distutils.util import strtobool
from mpi4py import MPI
#sys.path.append("../")
from fabry.tools import file_io
import numpy as np
import argparse
import ConfigParser
import time
def verify_restart():
a = raw_input("Are you sure you want to restart? ")
try:
a = bool(strtobool(a))
except ValueError:
print("invalid input exiting...")
sys.exit(1)
if a:
print("ok moving on with restart")
restart = True
else:
print("Ok, overwriting restart to False")
restart = False
return restart
def clean_filepath(filepath):
clean_path = path.expanduser(filepath)
clean_path = path.abspath(clean_path)
return clean_path
def read_calibration(calibration_folder):
posterior = np.loadtxt(path.join(calibration_folder, 'full_post_equal_weights.dat'), ndmin=2)
L_posterior = posterior[:, 0]
d_posterior = posterior[:, 1]
F_posterior = posterior[:, 2]
return L_posterior, d_posterior, F_posterior
def parse_config(config_filename):
config = ConfigParser.ConfigParser()
config.read(clean_filepath(config_filename))
sections = config.sections()
locs = []
folders = []
for section in sections:
locs.append(float(section))
folders.append(clean_filepath(config.get(section, 'path')))
return locs, folders
if __name__ == "__main__":
start_time = time.time()
Comm = MPI.COMM_WORLD
rank = Comm.Get_rank()
if rank == 0:
parser = argparse.ArgumentParser(description='Performs a Ti and PCX velocity profile solver in Ar using multiple images.')
parser.add_argument('config', type=str,
help='Filepath of configuration file')
parser.add_argument('cal_folder', metavar='calibration-folder', type=str,
help='Folder containing the fabry-perot calibration')
parser.add_argument('out_folder', metavar='output-folder',
type=str, help='Folder to place output files in')
parser.add_argument('filter_type', metavar='filter-type',type=str,
help='Name of filter (Ar, He, ar, he, ...)')
parser.add_argument('--restart', action='store_true', default=False,
help="Set to True if you want MultiNest to start over instead of resuming")
args = parser.parse_args()
if args.filter_type.lower() in ['ar', 'argon', '488']:
filter_type = 'argon'
elif args.filter_type.lower() in ['he', 'helium', '468.6']:
filter_type = 'helium'
else:
raise NotImplementedError('Filter {0:s} not recognized.'.format(args.filter_type))
restart = args.restart
if restart:
restart = verify_restart()
output_folder = clean_filepath(args.out_folder)
config_filename = clean_filepath(args.config)
chord_locs, folders = parse_config(config_filename)
for loc, folder in zip(chord_locs, folders):
print(loc, folder)
cal_folder = clean_filepath(args.cal_folder)
L_post, d_post, F_post = read_calibration(cal_folder)
solver_in = {'Lpost': L_post,
'dpost': d_post,
'Fpost': F_post,
'chord_locs': chord_locs,
'folders': folders,
'output_folder':output_folder,
'restart': restart,
'filter': filter_type,
}
else:
solver_in = None
solver_in = Comm.bcast(solver_in, root=0)
if solver_in is not None:
# run everything
import fabry.plasma.argon_plasma_solver as solver
Lpost = solver_in['Lpost']
dpost = solver_in['dpost']
Fpost = solver_in['Fpost']
chord_locs = solver_in['chord_locs']
folders = solver_in['folders']
output_folder = solver_in['output_folder']
restart = solver_in['restart']
if solver_in['filter'] == 'argon':
solver.multi_image_solver(output_folder, chord_locs, folders, Lpost, dpost, Fpost, test_plot=False, resume=not restart)
elif solver_in['filter'] == 'helium':
raise NotImplementedError("Helium is not implemented yet")
else:
raise ValueError("How did we get here?")
if rank == 0:
end_time = time.time()
print("Total Time Elasped: {} minutes".format((end_time-start_time)/60.0))
# # run plasma solver check here
# if solver_in['filter'] == 'argon':
# import fabry.plasma.check_argon_solver as checker
# checker.check_multi_solver(output_folder, chord_locs, folders, Lpost, dpost, Fpost)
# elif solver_in['filter'] == 'helium':
# raise NotImplementedError("Helium is not implemented yet")
# else:
# raise ValueError("How did we get here?")
```
#### File: fabry_perot/bin/write_finesse_config.py
```python
from __future__ import print_function, division
from future.builtins import input
import json
import argparse
from os.path import abspath
def get_F_limits():
input_string = "Please enter finesse (F) limits: "
F_lim = get_and_parse_user_input(input_string, float, n_elements=2)
return list(F_lim)
def get_Arel_limits():
input_string = "Please enter Th I relative amplitude (Arel) limits: "
Arel_lim = get_and_parse_user_input(input_string, float, n_elements=2)
return list(Arel_lim)
def get_Ti_limits():
input_string = "Please enter Ar II Ti limits: "
Ti_lim = get_and_parse_user_input(input_string, float, n_elements=2)
return list(Ti_lim)
def get_wavelengths():
input_string = "Please enter all of extra Th I wavelengths to consider: "
wavelengths = get_and_parse_user_input(input_string, float)
return list(wavelengths)
def get_extra_rel_amplitudes(wavelengths):
input_string = "Please enter the relative amplitude limits for {0:f} nm: "
amps = []
for w in wavelengths:
rel_amp = get_and_parse_user_input(input_string.format(w), float, n_elements=2)
amps.append(list(rel_amp))
return amps
def get_and_parse_user_input(input_string, type_to_cast, n_elements=None):
user_input = input(input_string)
user_input = user_input.split(",")
if len(user_input[0]) == 0:
print('im here')
return []
if n_elements:
# we know how many elements we need
return (type_to_cast(x) for _, x in zip(range(n_elements), user_input))
else:
return (type_to_cast(x) for x in user_input)
def main(fname):
print("*** All user inputs should be comma separated ***")
F_lim = get_F_limits()
Arel_lim = get_Arel_limits()
Ti_lim = get_Ti_limits()
w0 = get_wavelengths()
if w0:
amps = get_extra_rel_amplitudes(w0)
else:
amps = []
config = {'F_lim': F_lim,
'Arel_lim': Arel_lim,
'Ti_lim': Ti_lim,
'w_extra': w0,
'Arel_extra': amps,
}
with open(fname, 'w') as f:
json.dump(config, f, indent=4)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Writes MultiNest finesse solver prior information to a json file for the finesse solver.')
parser.add_argument('filename', type=str, help='file to write json data to')
args = parser.parse_args()
fname = abspath(args.filename)
main(fname)
```
#### File: fabry/core/models.py
```python
from __future__ import absolute_import, division, print_function
from collections import Iterable
import numpy as np
from scipy.integrate import trapz
from .zeeman import zeeman_lambda
from numba import jit
import os.path as path
try:
import matplotlib.pyplot as plt
except ImportError:
pass
@jit(nopython=True)
def trapezoidal_integration(y, x):
"""Performs trapezoidal intergration
Args:
y (Union[list, np.ndarray]): y data points
x (Union[list, np.ndarray]): x data points
Returns:
float: area under the curve y(x)
"""
n = len(x)
area = 0.0
for i in xrange(n - 1):
area += (x[i + 1] - x[i]) * (y[i + 1] + y[i])
return area / 2.0
@jit
def peak_calculator(L, d, w, order):
"""
Simple peak calculator for ideal Fabry-Perot.
.. math::
r_j = L \sqrt{ \left( \\frac{2d/w}{\\rm{Floor}(2d/w)-j}\\right)^2 - 1}
Args:
L (float): L value, units will set return units (pixels or mm)
d (float): etalon spacing in mm
w (float): peak wavelength in nm
order (Union[np.ndarray,int]): j order number. 0 gives order nearest center
of rings, increasing j as you move out.
Returns:
Union[np.ndarray, float]: the location of the peak(s)
in the same units as L with the same length as order
"""
m = 2.e6 * d / w
m0 = np.floor(m)
return L * np.sqrt(m ** 2 / (m0 - order) ** 2 - 1.0)
@jit
def airy_func(wavelength, cos_th, d, F):
"""
Computes the Airy function (ideal Fabry-Perot instument function)
as a function of wavelength and cos(theta) with parameters d and F
.. math::
A = \left(1 + Q \sin^2(\pi \\frac{2d}{w} \cos\\theta)\\right)^{-1}
.. math::
Q = \left(\\frac{2 \mathcal{F} }{ \pi } \\right)^2
Args:
wavelength (np.ndarray): wavelength array in nm
cos_th (np.ndarray): cos(theta) array
d (float): etalon spacing in mm
F (float): etalon finesse
Returns:
np.ndarray: evaluated airy function
"""
Q = (2. * F / np.pi) ** 2
airy = 1.0 / (1.0 + Q * np.sin(np.pi * 2.e6 * d * cos_th / wavelength) ** 2)
return airy
@jit
def doppler_calc(w0, mu, temp, v):
"""
Computes the doppler broadening sigma and the new central wavelength
from the doppler shift
.. math::
\sigma = w_0 \sqrt{\\frac{k_B T }{ mc^2}}
.. math::
w = w_0 (1 - v/c)
Args:
w0 (float): unshifted wavelength in nm
mu (float): atomic mass in amu
temp (float): temperature in eV
v (float): velocity in m/s
Returns:
(float, float)): sigma in nm, shifted wavelength in nm
"""
sigma = w0 * 3.2765e-5 * np.sqrt(temp / mu)
w = w0 * (1.0 - 3.336e-9 * v)
return sigma, w
@jit
def doppler_shift(w0, v):
return w0 * (1.0 - 3.336e-9 * v)
@jit
def doppler_broadening(w0, mu, temp):
return w0 * 3.2765e-5 * np.sqrt(temp / mu)
@jit
def gaussian(wavelength, w, sigma, amp=1., norm=True):
"""
Computes a gaussian for a given central wavelength, sigma and amp
.. math::
G = \\frac{A}{\sigma \sqrt{2 \pi}} \exp{\left( \\frac{ (w - w_0)^2 }{2 \sigma^2 } \\right) }
Args:
wavelength (np.ndarray): wavelength array to calculate spec on
w (float): central wavelength (same units as wavelength array)
sigma (float): sigma for gaussian (same units as w)
amp (float): amplitude of spectrum, default=1.0
norm (bool): if true, the gaussian will be normalized, default=True
to integrate to 1 over infinity then the amp factor will be multiplied
Returns:
np.ndarray: spectrum evaluated on wavelength array
"""
if norm:
norm = 1. / (sigma * np.sqrt(2. * np.pi))
else:
norm = 1.
exp = np.exp(-0.5 * (wavelength - w) ** 2 / sigma ** 2)
return amp * norm * exp
def lorentzian(wavelength, w, gamma, amp=1.):
"""
Computes a lorentzian for a given central wavelength, gamma and amp
.. math::
L = \\frac{A}{2 \pi} \\frac{\gamma }{ (w-w_0)^2 + (\gamma/2)^2}
Args:
wavelength (np.array): wavelength array to calculate spec on
w (float): central wavelength (same units as wavelength array)
gamma (float): lorentzian gamma parameter
amp (float, default=1.): amplitude in addition to one that integrates
spec to 1 over infinity
Returns:
spec (np.ndarray): spectrum evaluated on wavelength array
"""
A = (amp * 0.5 * gamma) / np.pi
return A / ((wavelength - w) ** 2 + (0.5 * gamma) ** 2)
def offset_forward_model(r, L, d, F, w0, mu, amp, temp, v, nlambda=1024, sm_ang=False, coeff=0.15, Ip=None, Id=None):
"""Forward q with an attempt to q the 'offset' from nuissance lines
Args:
r (np.ndarray): array of r values to compute q on
L (float): camera lens focal length, same units as r (pixels or mm)
d (float): etalon spacing (mm)
F (float): etalon finesse
w0 (Union[float, list]): central wavelength(s) in nm
mu (Union[float, list]): mass(es) in amu
amp (Union[float, list]): amplitude(s) for the lines
temp (Union[float, list]): temperature(s) in eV
v (Union[float, list]): velocities in m/s
nlambda (int): number of points in wavelength array, default=1024
sm_ang (bool): use the small angle approx or not, default=True
coeff (float): coefficient to q the relative amplitude of all the nuissance lines
Returns:
np.ndarray: array length of r of forward q
"""
# print(L, d, F, w0, mu, amp, temp, v)
# print(nlambda, sm_ang, coeff)
vals = forward_model(r, L, d, F, w0, mu, amp, temp, v, nlambda=nlambda)
# vals += max(amp) * coeff / (1.0 + F)
if Ip is not None and Id is not None:
# prioritize using my new offset model over arbitrary coefficient
Q = (2. * F / np.pi) ** 2
offset = -Ip/Q + (1.0+1.0/Q)*Id
vals += offset
else:
vals += np.max(amp) * coeff / (1.0 + (2.0 * F / np.pi) ** 2)
return vals
@jit
def forward_model(r, L, d, F, w0, mu, amp, temp, v, nlambda=1024):
"""
Convolves the Doppler spectrum with the ideal Fabry-Perot Airy function.
Args:
r (np.ndarray): array of r values to compute q on
L (float): camera lens focal length, same units as r (pixels or mm)
d (float): etalon spacing (mm)
F (float): etalon finesse
w0 (Union[float, list]): central wavelength(s) in nm
mu (Union[float, list]): mass(es) in amu
amp (Union[float, list]): amplitude(s) for the lines
temp (Union[float, list]): temperature(s) in eV
v (Union[float, list]): velocities in m/s
nlambda (int): number of points in wavelength array, default=1024
Returns:
np.ndarray: array length of r of forward q
"""
# if type(w0) in [list, tuple]:
# if not all([type(x) in [list,tuple] for x in [mu, amp, temp, v]]):
# raise ValueError('need to have a list for all spec params')
# if not all([len(x) == len(w0) for x in [mu, amp, temp, v]]):
# raise ValueError('spec params are not all the same length')
if isinstance(w0, Iterable):
# if not all(isinstance(x, Iterable) for x in [mu, amp, temp, v]):
# raise ValueError('Need to have a iterable for all spec params')
# if not all(len(x) == len(w0) for x in [mu, amp, temp, v]):
# raise ValueError('spec params are not all the same length')
sigma = []
w = []
for i, ww in enumerate(w0):
width, new_w = doppler_calc(ww, mu[i], temp[i], v[i])
sigma.append(width)
w.append(new_w)
# wavelength = np.linspace(min(w) - 10.*max(sigma), max(w) + 10.*max(sigma), nlambda)[:,np.newaxis]
wavelength = np.linspace(min(w) - 10. * max(sigma), max(w) + 10. * max(sigma), nlambda) # .reshape(nlambda, 1)
spec = 0.0
for idx, ww in enumerate(w):
spec += gaussian(wavelength, ww, sigma[idx], amp[idx])
else:
# if not all([type(x) not in [list,tuple] for x in [mu, amp, temp, v]]):
# raise ValueError('need to have a list or not for all spec params')
# if any(isinstance(x, Iterable) for x in [mu, amp, temp, v]):
# raise ValueError('all spec params must be an instance of Iterable or not an instance, no mixing')
sigma, w = doppler_calc(w0, mu, temp, v)
wavelength = np.linspace(w - 10. * sigma, w + 10. * sigma, nlambda) # [:,np.newaxis]
# wavelength = np.linspace(w - 10.*sigma, w + 10.*sigma, nlambda).reshape(nlambda, 1)
spec = gaussian(wavelength, w, sigma, amp)
# sigma, w = doppler_calc(w0, mu, temp, v)
# wavelength = np.linspace(w - 10.*sigma, w + 10.*sigma, nlambda)#[:,np.newaxis]
# spec = gaussian(wavelength, w, sigma, amp)
# if sm_ang:
# cos_th = 1.0 - 0.5 * (r/L)**2
# else:
# cos_th = L / np.sqrt(L**2 + r**2)
cos_th = L / np.sqrt(L ** 2 + r ** 2)
# cos_th = cos_th.reshape((1,len(r)))
# cos_th = cos_th[np.newaxis, :]
# q = trapz(spec*airy_func(wavelength, cos_th, d, F), wavelength, axis=0)
model = np.zeros_like(cos_th)
for idx, cos in enumerate(cos_th):
# print(trapz(spec*airy_func(wavelength, cos, d, F), wavelength).shape)
# q[idx] = trapz(spec*airy_func(wavelength, cos, d, F), wavelength)
model[idx] = trapezoidal_integration(spec * airy_func(wavelength, cos, d, F), wavelength)
return model
def match_finesse_forward(r, L, d, F, temp, v, errtemp=None, w0=487.98634, mu=39.948):
sigma, w = doppler_calc(w0, mu, temp, v * 1000.)
if errtemp is not None:
errsigma, _ = doppler_calc(w0, mu, errtemp, v * 1000.)
sigma = np.sqrt(sigma ** 2 + errsigma ** 2)
wavelength = np.linspace(w - 10. * sigma, w + 10. * sigma, 512)[:, np.newaxis]
spec = gaussian(wavelength, w, sigma, norm=False)
cos_th = 1.0 - 0.5 * (r / L) ** 2
model = trapz(spec * airy_func(wavelength, cos_th, d, F), wavelength, axis=0)
return model
def lyon_temp_forward(r, L, d, F, current, T, V, E=None):
w0 = 487.98634
mu = 39.948
# my previous calculation ###
# zeeman_fac = [-1.4, -19./15., -17./15., -1., 1., 17./15., 19./15., 1.4]
# zeeman_amp = [1.0, 3.0, 6.0, 10.0, 6.0, 3.0, 1.0]
# Victor's calculation ###
zeeman_fac = [-1., -17. / 15., -19. / 15., -1.4, 1.4, 19. / 15., 17. / 15., 1.]
zeeman_amp = [20., 12., 6., 2., 2., 6., 12., 20.]
B = (0.0133 / 80.) * current
sblah, w = doppler_calc(w0, mu, T, V * 1.e3)
if E is not None:
eblah, _ = doppler_calc(w0, mu, E, V * 1.e3)
sblah = np.sqrt(sblah ** 2 + eblah ** 2)
lambdas, amps = zeeman_lambda(w, B, zeeman_fac, amps=zeeman_amp)
mn = w - 10. * sblah
mx = w + 10. * sblah
wavelength = np.linspace(mn, mx, 1024, endpoint=True)[:, np.newaxis]
spec = 0.
for l, a in zip(lambdas, amps):
sigma, _ = doppler_calc(l, mu, T, 0.0) # already did the velocity shift
if E is not None:
esigma, _ = doppler_calc(l, mu, E, 0.0)
sigma = np.sqrt(sigma ** 2 + esigma ** 2)
spec += gaussian(wavelength, l, sigma, amp=a, norm=False)
cos_th = 1.0 - 0.5 * (r / L) ** 2
model = trapz(spec * airy_func(wavelength, cos_th, d, F), wavelength, axis=0)
return model
# def lyon_temp_forward_prof(r,L,d,F,current,T,V):
def model_with_velocity_profile(r, L, d, F, T, vel_profile, dens_profile=None, zarr=None):
w0 = 487.98634
mu = 39.948
if dens_profile is None:
dens_profile = np.ones_like(vel_profile)
else:
dens_profile = np.asarray(dens_profile)
if len(dens_profile) == 1:
dens_profile = np.ones_like(vel_profile)
nV = len(vel_profile)
nW = 2000
vmax = np.max(vel_profile)
w_max_shifted = doppler_shift(w0, vmax)
sigma_Ti = doppler_broadening(w_max_shifted, mu, T)
w_arr = np.linspace(w_max_shifted-10*sigma_Ti, w_max_shifted+10*sigma_Ti, nW)
# fig, ax = plt.subplots()
# ax.plot(zarr, dens_profile, label='dens')
# ax.plot(zarr, vel_profile / vmax, label='v')
# ax.legend()
# plt.show()
spectra = np.zeros((nV, nW))
fig, ax = plt.subplots()
for idx, vel in enumerate(vel_profile):
wshift = doppler_shift(w0, vel)
sigma = doppler_broadening(wshift, mu, T)
spectra[idx, :] = gaussian(w_arr, wshift, sigma, amp=dens_profile[idx]**2, norm=False)
ax.plot(w_arr, spectra[idx, :])
plt.show()
if zarr is None:
total_spectra = np.sum(spectra, axis=0)
else:
total_spectra = np.trapz(spectra, zarr, axis=0)
new_sigma_Ti = doppler_broadening(w_max_shifted, mu, T)
test_spectra = gaussian(w_arr, w_max_shifted, new_sigma_Ti, norm=False)
fig, ax = plt.subplots()
i = np.argmax(total_spectra)
j = np.argmax(test_spectra)
ax.plot(w_arr, total_spectra / total_spectra.max(), label='v prof')
ax.plot(w_arr-(w_arr[j]-w_arr[i]), test_spectra / test_spectra.max(), label='test')
ax.legend()
plt.show()
def zeeman_with_arb_nv(r, L, d, F, current, temp, vbulk, vincrease, extra_temp=None):
w0 = 487.98634
mu = 39.948
# Victor's calculation ###
zeeman_fac = [-1., -17. / 15., -19. / 15., -1.4, 1.4, 19. / 15., 17. / 15., 1.]
zeeman_amp = [20., 12., 6., 2., 2., 6., 12., 20.]
current_dir = path.abspath(path.join(__file__, ".."))
b_data = np.genfromtxt(path.join(current_dir, "lyon_magnetic_field.csv"), delimiter=",")
z = b_data[:, 0]
bz = b_data[:, 1]
bz /= 10000.0 # Covert G to T
# Adjust bz for the current in the coil
bz *= current / 80.0
# I only want to deal with the array where the plasma is emitting
zbounds = [-30.0, 80.0] # Victor says that plasma exists here
i_lower = np.abs(z-zbounds[0]).argmin()
i_higher = np.abs(z-zbounds[1]).argmin()
sl = slice(i_lower, i_higher)
z = z[sl]
bz = bz[sl]
density = 0.25 * (np.tanh(0.25*z)+1) + 0.5
vel = vbulk * np.ones_like(z)
idx = np.where(z<0.0)
vel[idx] = vbulk - vincrease * z[idx] / 30.0
nz = len(z)
nw = 2048
spectrum = np.zeros((nz, nw))
sigma_Ti = doppler_broadening(w0, mu, temp)
wshift = doppler_shift(w0, np.max(vel))
sigma_extra = 0.0
if extra_temp is not None:
sigma_extra = doppler_broadening(w0, mu, extra_temp)
sigma_Ti = np.sqrt(sigma_Ti**2 + sigma_extra**2)
warr = np.linspace(wshift-10*sigma_Ti, wshift+10*sigma_Ti, nw)
for idx, (zz, bb, vv, ne) in enumerate(zip(z, bz, vel, density)):
w_main_shift = doppler_shift(w0, vv)
w_zee, a_zee = zeeman_lambda(w_main_shift, bb, zeeman_fac, amps=zeeman_amp)
for wz, az in zip(w_zee, a_zee):
# calculate sigma_Ti
sigma_Ti = doppler_broadening(wz, mu, temp)
sigma_Ti = np.sqrt(sigma_Ti**2 + sigma_extra**2)
spectrum[idx, :] += gaussian(warr, wz, sigma_Ti, amp=az, norm=False) * ne**2
final_spectrum = np.trapz(spectrum, z, axis=0)
cos_theta = L / np.sqrt(L**2 + r**2)
cos_theta.shape = (1, len(r))
final_spectrum.shape = (nw, 1)
warr.shape = (nw, 1)
airy = airy_func(warr, cos_theta, d, F)
zee_model = np.trapz(final_spectrum * airy, warr, axis=0)
return zee_model
def zeeman_with_lyon_profile(r, L, d, F, current, temp, vel, extra_temp=None):
w0 = 487.98634
mu = 39.948
# Victor's calculation ###
zeeman_fac = [-1., -17. / 15., -19. / 15., -1.4, 1.4, 19. / 15., 17. / 15., 1.]
zeeman_amp = [20., 12., 6., 2., 2., 6., 12., 20.]
current_dir = path.abspath(path.join(__file__, ".."))
b_data = np.genfromtxt(path.join(current_dir, "lyon_magnetic_field.csv"), delimiter=",")
z = b_data[:, 0]
bz = b_data[:, 1]
# I only want to deal with the array where the plasma is emitting
zbounds = [0.0, 80.0] # Victor says that plasma exists here
i_lower = np.abs(z-zbounds[0]).argmin()
i_higher = np.abs(z-zbounds[1]).argmin()
sl = slice(i_lower, i_higher)
z = z[sl]
bz = bz[sl]
bz /= 10000.0 # Covert G to T
# Adjust bz for the current in the coil
bz *= current / 80.0
nz = len(z)
nw = 2048
spectrum = np.zeros((nz, nw))
sigma_Ti = doppler_broadening(w0, mu, temp)
w = doppler_shift(w0, vel)
# Extra broadening from defocusing the camera lens
if extra_temp:
sigma_extra = doppler_broadening(w0, mu, extra_temp)
sigma_Ti = np.sqrt(sigma_Ti**2 + sigma_extra**2)
w_arr = np.linspace(w - 10*sigma_Ti, w + 10*sigma_Ti, nw)
# Need to loop over reach z location
for idx, (zz, bb) in enumerate(zip(z, bz)):
# calculate the spectrum here
w_zee, a_zee = zeeman_lambda(w, bb, zeeman_fac, amps=zeeman_amp)
for wz, az in zip(w_zee, a_zee):
spectrum[idx, :] += gaussian(w_arr, wz, sigma_Ti, amp=az, norm=False)
final_spectrum = np.trapz(spectrum, z, axis=0)
cos_theta = L / np.sqrt(L**2 + r**2)
cos_theta.shape = (1, len(r))
final_spectrum.shape = (nw, 1)
w_arr.shape = (nw, 1)
airy = airy_func(w_arr, cos_theta, d, F)
zee_model = np.trapz(final_spectrum * airy, w_arr, axis=0)
return zee_model
@jit(nopython=True)
def general_model(r, L, d, F, wavelength, emission):
cos_th = L / np.sqrt(L ** 2 + r ** 2)
# cos_th = 1.0 - 0.5 * (r/L)**2
model = np.zeros_like(cos_th)
for idx, cos in enumerate(cos_th):
airy = airy_func(wavelength, cos, d, F)
model[idx] = trapezoidal_integration(airy*emission, wavelength)
# cos_th = cos_th.reshape((1, len(r)))
# emis = emission[:, np.newaxis]
# w = wavelength[:, np.newaxis]
# # print('cos shape', cos_th.shape)
# # print('w shape', w.shape)
# # print('emis shape', emis.shape)
# airy = airy_func(w, cos_th, d, F)
# # print('airy shape', airy.shape)
# model = trapz(emis * airy, w, axis=0)
return model
```
#### File: fabry/core/ringsum.py
```python
from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as plt
import multiprocessing as mp
from numba import jit
"""
Core module contains ringsum codes that are the basis of this
analysis.
Functions:
get_binarr: returns binarr for ringsums
smAng_ringsum: main ringsum function, uses small angle approx.
proper_ringsum: additional ringsum function that makes no approx.
locate_center: center finding function
new_ringsum: best ringsum to use currently
"""
def quick_gaussian_peak_finder(x, y):
i_max = y.argmax()
sl = slice(i_max-10, i_max+11)
p0 = np.polyfit(x[sl], np.log(y[sl]), 2)
return -p0[1] / p0[0] / 2.0
def get_bin_edges(dat, x0, y0, binsize=0.1):
"""Returns equal area annuli bins for a ring image given a center and binsize
.. math::
r_n = \sqrt{n \left(2 r_N - \Delta \\right) \Delta } \quad n \in 0,...,N
.. math::
\Delta = \\text{ binsize}
Args:
dat (np.ndarray): pixel values for the camera image
x0 (float): x location (pixels) of the center of the image
y0 (float): y location (pixels) of the center of the image
binsize (float, optional): smallest radial bin size (occurs at the largest bin)
Returns:
np.ndarray: bins for the ring sum in pixels
"""
ny, nx = dat.shape
x = np.array([0, nx - 1]) - x0
y = np.array([0, ny - 1]) - y0
xmin = np.abs(x).min()
ymin = np.abs(y).min()
ri = np.min([xmin, ymin])
imax = int(np.floor(ri ** 2 / (2 * ri - binsize) / binsize))
i = np.linspace(0, imax, imax + 1)
redges = np.sqrt(i * (2 * ri - binsize) * binsize)
return redges
def locate_center(data_in, xguess=None, yguess=None, maxiter=25, binsize=0.1, plotit=False, block_center=False,
printit=False):
"""
Finds the center of a ring pattern image by preforming ringsums.
Args:
data_in (np.ndarray): pixel values for the camera image
xguess (float): guess of x location of center, if None, takes data center
yguess (float): guess of y location of center, if None, takes data center
maxiter (int): maximum number of center finding iterations
binsize (float): smallest radial binsize (occurs at the largest bin)
plotit (bool): plot fitting curves
block_center (bool): block center of image (600x600 on xguess,yguess) when finding center. This helps when
there is a ring at the center of the image.
printit (bool): print center find progress if True
Returns:
tuple (float, float): x and y location of the center
"""
if xguess is None:
xguess = data_in.shape[1] / 2.
if yguess is None:
yguess = data_in.shape[0] / 2.
if printit:
print(xguess, yguess)
if block_center:
data = np.copy(data_in)
data[int(yguess - 300):int(yguess + 301), int(xguess - 300):int(xguess + 301)] = 0.0
else:
data = data_in
#line1 = None
#line2 = None
#fig = None
#ax = None
#if plotit:
# plt.ion()
# fig, ax = plt.subplots()
#if printit:
# print("Center finding:")
# print("start x0: {0} y0: {1}".format(xguess, yguess))
for ii in range(maxiter):
binarr, ULsigarr, URsigarr, BLsigarr, BRsigarr = ringsum(data, xguess, yguess, binsize=binsize, quadrants=True)
ULsigarr /= ULsigarr.max()
URsigarr /= URsigarr.max()
BLsigarr /= BLsigarr.max()
BRsigarr /= BRsigarr.max()
left = ULsigarr / ULsigarr.max() + BLsigarr / BLsigarr.max()
right = URsigarr / URsigarr.max() + BRsigarr / BRsigarr.max()
rL = quick_gaussian_peak_finder(binarr, left)
rR = quick_gaussian_peak_finder(binarr, right)
up = ULsigarr / ULsigarr.max() + URsigarr / URsigarr.max()
down = BLsigarr / BLsigarr.max() + BRsigarr / BRsigarr.max()
rU = quick_gaussian_peak_finder(binarr, up)
rD = quick_gaussian_peak_finder(binarr, down)
dx = (rL-rR)/2.0
xguess -= dx #(rL - rR)/2.0
dy = (rU - rD)/2.0
yguess -= dy #(rU - rD)/2.0
if printit:
print("Delta x={0:.4f} Delta y={1:.4f}".format(-dx, -dy))
# thres = 0.3 * np.max(ULsigarr + URsigarr)
# i = np.where(ULsigarr + URsigarr > thres)[0]
# # A cheat if the i goes to the edge of the array. Makes sliding them past each other hard
# if len(ULsigarr) - i.max() < 60:
# i = i[0:-50]
# ni = len(i)
# ns = 25
# # ns = 13
# sarr = 2 * np.arange(-ns, ns + 1, 1)
# sarr_max = np.max(sarr)
# UB = np.zeros(len(sarr))
# RL = np.zeros(len(sarr))
# for idx, ix in enumerate(sarr):
# UB[idx] = np.sum((ULsigarr[i - ix] + URsigarr[i - ix] - BLsigarr[i] - BRsigarr[i]) ** 2) / (
# 1.0 * ni - np.abs(ix))
# RL[idx] = np.sum((URsigarr[i - ix] + BRsigarr[i - ix] - ULsigarr[i] - BLsigarr[i]) ** 2) / (
# 1.0 * ni - np.abs(ix))
# RLfit = np.polyfit(sarr, RL, 2)
# UBfit = np.polyfit(sarr, UB, 2)
# """
# The logic for the center jump is matching A(x-x0)^2 = C0 x^2 + C1 x + C2
# x0 = - C1 / (2 C0)
# """
# if RLfit[0] < 0.0:
# # Concave down fit
# RLcent = -2 * np.max(sarr) * np.sign(RLfit[1])
# else:
# # concave up
# RLcent = -RLfit[1] / (2 * RLfit[0])
# # Dont jump fartther than max(sarr)
# if np.abs(RLcent) > sarr_max:
# RLcent = np.sign(RLcent) * np.max(sarr)
# if UBfit[0] < 0.0:
# # concave down
# UBcent = -2 * np.max(sarr) * np.sign(UBfit[1])
# else:
# # concave up
# UBcent = -UBfit[1] / (2 * UBfit[0])
# # Dont jump fartther than max(sarr)
# if np.abs(UBcent) > sarr_max:
# UBcent = np.sign(UBcent) * np.max(sarr)
# if False: # ~np.isfinite(RLcent):
# xguess -= binsize
# else:
# xguess -= RLcent * binsize
# if False: # ~np.isfinite(UBcent):
# yguess += binsize
# else:
# yguess += UBcent * binsize
if printit:
print("{2:d}, update x0: {0}, y0: {1}".format(xguess, yguess, ii))
# if plotit:
# if line1 is not None:
# line1.set_data(sarr, UB)
# else:
# line1, = ax.plot(sarr, UB, 'r', lw=1, label='UD')
# if line2 is not None:
# line2.set_data(sarr, RL)
# else:
# line2, = ax.plot(sarr, RL, 'b', lw=1, label='RL')
# if ii == 0:
# ax.legend()
# fig.canvas.draw()
# plt.pause(1.0)
#if np.sqrt((UBcent * binsize) ** 2 + (RLcent * binsize) ** 2) / binsize < 0.1:
if np.sqrt(dx**2 + dy**2) < binsize * 0.2:
break
# if plotit:
# plt.close(fig)
# plt.ioff()
return xguess, yguess
@jit(nopython=True)
def calculate_weighted_mean(data, error):
"""Calculates the weighted mean of data with standard deviation error
Args:
data (np.ndarray): data array
error (np.ndarray): standard deviation error bar for data
Returns:
tuple (float, float): weighted mean and weighted standard deviation
"""
idx = np.where(error > 0.0)
err = error[idx]
d = data[idx]
weights = 1.0 / err ** 2
denominator = np.nansum(weights)
sigma = np.sqrt(1.0 / denominator)
numerator = np.nansum(d * weights)
mean = numerator / denominator
return mean, sigma
@jit(nopython=True)
def super_pixelate(data, npix=2):
"""Creates super pixels for image data
Args:
data (np.ndarray): 2d image data
npix (int, optional): integer number of pixels to create an npix x npix super pixel
Returns:
np.ndarray: New image made from the super pixels
"""
n, m = data.shape
n_new = n // npix
m_new = m // npix
d = np.zeros((n_new, m_new))
for i in xrange(n_new):
for j in xrange(m_new):
n_idx = slice(i * npix, (i + 1) * npix)
m_idx = slice(j * npix, (j + 1) * npix)
# d[i, j] = np.mean(data[n_idx, m_idx])
d[i, j] = np.sum(data[n_idx, m_idx])
return d
def ringsum(data, x0, y0, binsize=0.1, quadrants=False, use_weighted=False, remove_hot_pixels=False):
"""Returns a equal annulus area ringsum centered at (x0, y0) from data
Args:
data (np.ndarray): 2d image data
x0 (float): center location in x
y0 (float): center location in y
binsize (float, optional): the delta r of the last annulus, default=0.1
quadrants (bool): split the ringsum into 4 quadrants to use multiprocessing, default=False
use_weighted (bool): use a weighted mean, default=False
Returns:
tuple
"""
ny, nx = data.shape
x = np.arange(0, nx, 1)
y = np.arange(0, ny, 1)
xx, yy = np.meshgrid(1. * x - x0, 1. * y - y0)
R = np.sqrt(xx ** 2 + yy ** 2)
redges = get_bin_edges(data, x0, y0, binsize=binsize)
ri = int(redges[-1])
xi0 = int(x0)
yi0 = int(y0)
i1 = [yi0 - ri, yi0 - ri, yi0, yi0]
i2 = [yi0 + 1, yi0 + 1, yi0 + ri + 1, yi0 + ri + 1]
j1 = [xi0 - ri, xi0, xi0 - ri, xi0]
j2 = [xi0 + 1, xi0 + ri + 1, xi0 + 1, xi0 + ri + 1]
rarr = 0.5 * (redges[0:-1] + redges[1:])
if quadrants:
procs = []
nprocs = 4
sigs = {}
out = mp.Queue()
labels = ['UL', 'UR', 'BL', 'BR']
for k in range(nprocs):
p = mp.Process(target=_ringsum, args=(redges[1:],
R[i1[k]:i2[k], j1[k]:j2[k]], data[i1[k]:i2[k], j1[k]:j2[k]]),
kwargs={'out': out, 'label': labels[k], 'use_weighted': False, 'remove_hot_pixels':remove_hot_pixels})
procs.append(p)
p.start()
for i in range(nprocs):
tup = out.get()
sigs[tup[0]] = tup[1]
for p in procs:
p.join()
return rarr, sigs['UL'], sigs['UR'], sigs['BL'], sigs['BR']
else:
sig, sigma = _ringsum(redges[1:], R, data, out=None, label=None, use_weighted=False, remove_hot_pixels=remove_hot_pixels)
return rarr, sig, sigma
def _ringsum(redges, radii, data, out=None, label=None, use_weighted=False, remove_hot_pixels=False):
"""Helper function for ringsumming
Args:
redges (np.ndarray): bin edges (does not include origin)
radii (np.ndarray): radii to bin
data (np.ndarray): image weights for the radii to be binned with
out (mp.Queue, optional): multiprocessing queue to place results in if needed
label (list, optional): label to put with results when placing in out
use_weighted (bool): use weighted mean if True, default=False
Returns:
None if use_weighted
tuple (np.ndarray, np.ndarray): ring sum, ring sum standard deviations
"""
# redges does not include zero!
n = len(redges)
R = radii.flatten()
d = data.flatten()
indsort = np.argsort(R)
R = R[indsort]
d = d[indsort]
n = len(redges)
means = np.zeros(n)
sigmas = np.zeros(n)
lengths = np.zeros(n)
start = 0
for idx, edge in enumerate(redges):
iedge = np.searchsorted(R[start:], edge, side='right')
portion = slice(start, start + iedge)
if use_weighted:
means[idx], sigmas[idx] = calculate_weighted_mean(d[portion], np.sqrt(1.8 * d[portion]))
else:
means[idx] = np.mean(d[portion])
std = np.std(d[portion])
sigmas[idx] = std / np.sqrt(len(d[portion]))
if remove_hot_pixels:
good_pixels = np.where(np.abs(d[portion] - means[idx]) <= 3.0*std)
if use_weighted:
means[idx], sigmas[idx] = calculate_weighted_mean(d[portion][good_pixels], np.sqrt(1.8 * d[portion][good_pixels]))
else:
means[idx] = np.mean(d[portion][good_pixels])
# print(len(d[portion]), len(d[portion][good_pixels]))
if False:#idx == 700:
print(edge)
fig, ax = plt.subplots()
ax.hist(d[portion], bins='auto')
ax.axvline(means[idx]-3.0*std)
ax.axvline(means[idx]+3.0*std)
plt.show()
sigmas[idx] = np.std(d[portion][good_pixels]) / np.sqrt(len(d[portion][good_pixels]))
lengths[idx] = len(d[portion])
start += iedge
#print(len(d[portion]))
if out and label:
out.put((label, means, sigmas))
else:
return means, sigmas
```
#### File: fabry/core/synthetic_data.py
```python
from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import trapz
import multiprocessing as mp
import h5py
def airy_func(wavelength, cos_th, d, F):
Q = (2. * F / np.pi)**2
airy = 1.0 / (1.0 + Q * np.sin(np.pi * 2.e6 * d * cos_th / wavelength)**2)
return airy
def doppler_calc(w0, mu, temp, v):
sigma = w0 * 3.2765e-5 * np.sqrt(temp / mu)
w = w0 * (1.0 - 3.336e-9 * v)
print(w, sigma, w0, mu, temp, v)
return sigma, w
def gaussian(wavelength, w, sigma, amp=1.):
norm = 1. / (sigma * np.sqrt(2.*np.pi))
exp = np.exp(-0.5 * (wavelength-w)**2 / sigma**2)
return amp * norm * exp
def lorentzian(wavelength, w, gamma, amp=1.):
A = (amp * 0.5 * gamma) / np.pi
return A / ((wavelength - w)**2 + (0.5 * gamma)**2)
def forward_model(r, L, d, F, w0, mu, amp, temp, v, nlambda=1024, sm_ang=False, nprocs=6):
sm_ang = False # you are never coming back!
if type(w0) in [list, tuple]:
if not all([type(x) in [list,tuple] for x in [mu, amp, temp, v]]):
raise ValueError('need to have a list for all spec params')
if not all([len(x) == len(w0) for x in [mu, amp, temp, v]]):
raise ValueError('spec params are not all the same length')
sigma = []
w = []
for i,ww in enumerate(w0):
s, l = doppler_calc(ww, mu[i], temp[i], v[i])
sigma.append(s)
w.append(l)
if nprocs > 1:
wavelength = np.linspace(min(w) - 10.*max(sigma), max(w) + 10.*max(sigma), nlambda)
else:
wavelength = np.linspace(min(w) - 10.*max(sigma), max(w) + 10.*max(sigma), nlambda)[:,np.newaxis]
spec = 0.0
for idx,ww in enumerate(w):
spec += gaussian(wavelength, ww, sigma[idx], amp[idx])
else:
if not all([type(x) not in [list,tuple] for x in [mu, amp, temp, v]]):
raise ValueError('need to have a list or not for all spec params')
sigma, w = doppler_calc(w0, mu, temp, v)
if nprocs > 1:
wavelength = np.linspace(w - 10.*sigma, w + 10.*sigma, nlambda)
else:
wavelength = np.linspace(w - 10.*sigma, w + 10.*sigma, nlambda)[:,np.newaxis]
spec = gaussian(wavelength, w, sigma, amp)
if sm_ang:
cos_th = 1.0 - 0.5 * (r/L)**2
else:
cos_th = L / np.sqrt(L**2 + r**2)
if nprocs > 1:
def par_func(cos_th, spec, wavelength, d, F, out=None, label=None):
model = np.zeros_like(cos_th)
for i, cth in enumerate(cos_th):
model[i] = trapz(spec*airy_func(wavelength, cth, d, F), wavelength, axis=0)
model[i] *= 1000
model[i] += np.random.normal(loc=0.0, scale=np.sqrt(model[i]), size=1)
if out and label:
out.put((label, model))
else:
return model
cos_ths = np.array_split(cos_th, nprocs)
procs = []
sigs = {}
out = mp.Queue()
labels = ['{0}'.format(x) for x in range(nprocs)]
for k in range(nprocs):
p = mp.Process(target=par_func, args=(cos_ths[k], spec, wavelength, d, F),
kwargs={'out':out, 'label': labels[k]})
procs.append(p)
p.start()
for i in range(nprocs):
tup = out.get()
sigs[tup[0]] = tup[1]
for p in procs:
p.join()
model = []
for k in labels:
model.append(sigs[k])
model = np.concatenate(model)
else:
model = trapz(spec*airy_func(wavelength, cos_th, d, F), wavelength, axis=0)
return model
def ccd(npx=(4016, 6016),px_size=0.004):
cntr = [(x-1)/2. for x in npx]
return px_size * np.fromfunction(lambda i, j: np.sqrt((i-cntr[0])**2 + (j-cntr[1])**2), npx, dtype=float)
def ccd_quad(npx=(4016, 6016), px_size=0.004):
end = (int(npx[0]/2.), int(npx[1]/2.))
return px_size * np.fromfunction(lambda i,j: np.sqrt((i+0.5)**2+(j+0.5)**2), end, dtype=float)
def recomb_quad(a):
b = np.vstack((a[::-1,:],a))
return np.hstack((b[:,::-1],b))
def full_pattern(L,d,F,w0,mu,T,V,A=1.,sm_ang=False,nprocs=6,plotit=False,saveit=None):
'''
produces full synthethic ring pattern for Nikon d5200/5300 camera
Inputs:
L (float): camera lens focal length (in mm)
d (float): etalon spacing (in mm)
F (float): etalon finesse
w0 (float or list of floats): wavelengths of spectrum (in nm)
mu (float or list of floats): atomic mass of elements used, same
order as w0 list (in a.m.u.)
V (float or list of floats): flow velocity of spectrum (in km/s)
A (float or list of floats): amplitudes of spectrum, default is 1
sm_ang (bool, default=F): flag to use the small angle approximation,
for true synthetic data this should be False
nprocs (int, default=6): number of processors to use for calc.
plotit (bool, default=F): flag to plot the resulting rings
saveit (str, default=None): hdf5 filename for optional saved rings,
if left to None, the data will not be saved
Outputs:
rings (np.ndarray): output of forward forward q
'''
if type(w0) is list and type(A) is float:
A = [A]*len(w0)
a = ccd_quad()
rings = forward_model(a.flatten(),L,d,F,w0,mu,A,T,V, nprocs=nprocs)
print('done with first')
rings = rings.reshape(a.shape)
rings = recomb_quad(rings)
r = np.arange(0., (np.sqrt(rings.shape[0]**2 + rings.shape[1]**2)/2.) + 0.0005, 0.001)
ringsum = forward_model(r*0.004, L, d, F, w0, mu, A, T, V, nprocs=nprocs)
if saveit is not None:
with h5py.File(saveit,'w') as hf:
hf.create_dataset('2Ddata',data=rings,compression='lzf')
hf.create_dataset('1Ddata',data=ringsum,compression='lzf')
hf.create_dataset('1Dr',data=r,compression='lzf')
hf.create_dataset('L',data=L)
hf.create_dataset('d',data=d)
hf.create_dataset('F',data=F)
hf.create_dataset('amp',data=A)
hf.create_dataset('w0',data=w0)
hf.create_dataset('mu',data=mu)
hf.create_dataset('temp',data=T)
hf.create_dataset('vel',data=V)
if plotit:
f,axs = plt.subplots(figsize=(10,7))
axs.imshow(rings, cmap='Greys_r', interpolation='nearest', vmin=0, origin='lower')
axs.set_aspect('equal')
plt.show(block=False)
f,axs = plt.subplots(figsize=(10,7))
axs.plot(r,ringsum,lw=2)
axs.set_xlabel('R (px)')
plt.show()
return rings
```
#### File: fabry/core/synthetic.py
```python
from __future__ import print_function, division, absolute_import
import numpy as np
from . import models
import multiprocessing as multi
from collections import MutableMapping
from ..plasma import plasma
########################################
# Physical constants, DO NOT OVERWRITE #
########################################
q = 1.6e-19
c = 2.998e8
mp = 1.672e-27
class Sensor(object):
"""A representation for a camera sensor for the Fabry Perot
Attributes:
nx (int): number of pixels in the x direction
ny (int): number of pixels in the y direction
px_size (float): Pixel size in mm
x0 (float): x location of the center
y0 (float): y location of th center
sensor (np.ndarray): 2D array representing the sensors where each
element is the count value for that pixel
"""
def __init__(self, nx, ny, px_size=0.004):
super(Sensor, self).__init__()
self.nx = int(nx)
self.ny = int(ny)
self.sensor = np.zeros((nx, ny))
self.px_size = px_size
self.x0 = nx / 2.0
self.y0 = ny / 2.0
self._R = None # Delay the creation of the R matrix until it is actually needed
def create_Rgrid(self):
"""Creates a 2D matrix of radii values for each pixel
returns:
np.ndarray
"""
nx = self.nx
ny = self.ny
x = np.arange(1, nx + 1, 1)
y = np.arange(1, ny + 1, 1)
XX, YY = np.meshgrid(x, y)
R = np.sqrt((XX - self.x0) ** 2 + (YY - self.y0) ** 2)
return R
@property
def R(self):
"""np.ndarray: 2D array of radii values for each pixel"""
if self._R is None:
self._R = self.create_Rgrid()
return self._R
# @R.setter
# def R(self):
# self._R = self.create_Rgrid()
def calculate_emission(self, etalon, light_source, nprocs=4):
"""Calculates emission from a light source through an etalon onto the sensor
Args:
etalon (Etalon): representation of the etalon
light_source (LightSource): representation of the light source
"""
self.sensor = etalon.calculate_emission(self, light_source, nprocs=nprocs)
@classmethod
def from_dict(cls, sensor):
"""Creates an instance of Sensor from a dictionary
Args:
sensor (dict): dictionary representing a sensor
Returns:
Sensor: a new instance of a Sensor
"""
nx = sensor.get('nx', 1024)
ny = sensor.get('ny', 1024)
px_size = sensor.get('px_size', 0.004)
return cls(nx, ny, px_size=px_size)
def to_dict(self):
"""Returns a dictionary representation of a Sensor
Returns:
dict: a dictionary representation of a Sensor
"""
return {'nx': self.nx, 'ny': self.ny, 'px_size': self.px_size}
def __repr__(self):
class_name = type(self).__name__
return '{}({!r}, {!r}, px_size={!r})'.format(class_name, self.nx, self.ny, self.px_size)
class Etalon(object):
"""
Class that represents an etalon for a Fabry-Perot spectrometer
Attributes:
L (float): focal length of lens for the camera
d (float): etalon spacing
F (float): finesse of etalon
"""
def __init__(self, L, d, F):
super(Etalon, self).__init__()
self.L = L
self.d = d
self.F = F
def calculate_emission(self, sensor, light_source, nprocs=4):
"""Calcultes the emission onto a sensor from a light source
Note: This uses multiprocessing for speed. Each process has a for loop
because of memory constraints if the sensor is too big.
Args:
sensor (Sensor): represents a camera sensor
light_source (LightSource): represents a light source for the Fabry Perot
nprocs (int): number of processes to use
Returns:
np.ndarray: shape matches sensor.sensor.shape
"""
r = sensor.R
px_size = sensor.px_size
w = light_source.wavelength
mu = light_source.mu
amp = light_source.amplitude
vel = light_source.velocity
temp = light_source.temperature
split_r = np.array_split(r.flatten(), nprocs)
procs = []
sigs = {}
out = multi.Queue()
labels = ['{0}'.format(x) for x in range(nprocs)]
for k in range(nprocs):
p = multi.Process(target=Etalon._calculate_emission,
args=(split_r[k], self.L / px_size, self.d, self.F, w, mu, amp, temp, vel),
kwargs={'out': out, 'label': labels[k]})
procs.append(p)
p.start()
for i in range(nprocs):
tup = out.get()
sigs[tup[0]] = tup[1]
for p in procs:
p.join()
emission = []
for k in labels:
emission.append(sigs[k])
emission = np.concatenate(emission)
emission.shape = r.shape
return emission
@staticmethod
def _calculate_emission(r, L, d, F, w, mu, amp, temp, vel, out=None, label=None, noise=True):
"""Helper function for calculating emission
Note: This is utlized by calculate_emission for multiprocessing
Args:
r (np.ndarray): radii in pixels
L (float): focal length of camera lens in pixels
d (float): etalon spacing in mm
F (float): finesse of etalon
w (float): wavelength in nm
mu (float): relative mass
amp (float): amplitude of line
temp (float): ion temperature in eV
vel (float): velocity of ion in m/s
out (multiprocessing.Queue): output queue
label (str): label for the output being put into the output queue
Returns:
np.ndarray (optional) only if not using for multiprocessing
"""
# Memory is a problem here because of broadcasting so I'm going to split the problem up
model = []
r_split = np.array_split(r.flatten(), 1000)
for r_sp in r_split:
model.append(models.forward_model(r_sp, L, d, F, w, mu, amp, temp, vel))
model = np.concatenate(model)
if noise:
print(label, 'adding noise to the image')
npts = len(model)
for i in xrange(npts):
model[i] = model[i] + np.random.normal(scale=np.sqrt(model[i]))
if i % 100 == 0:
print(model[i], np.random.normal(scale=np.sqrt(model[i])))
if out and label:
print(label)
out.put((label, model))
else:
return model
def __repr__(self):
class_name = type(self).__name__
return "{}({!r}, {!r}, {!r})".format(class_name, self.L, self.d, self.F)
@classmethod
def from_dict(cls, etalon):
"""Creates an instance of Etalon from a dictionary
Args:
etalon (dict): dictionary representing a etalon
Returns:
Etalon: an new instance of a Etalon
"""
L = etalon.get('L', 150.0)
d = etalon.get('d', 0.88)
F = etalon.get('F', 21.0)
return cls(L, d, F)
def to_dict(self):
"""Returns a dictionary representing itself
Returns:
dict: a dictionary representing itself
"""
return {"L": self.L, "d": self.d, "F": self.F}
class LightSource(object):
"""A representation of a light source for a Fabry-Perot spectrometer
Attributes:
temperature (float): temperature of the emitting ion in eV
wavelength (float): wavelength of the light emitted in nm
mu (float): relative mass of the ion
amplitude (float): amplitude of the light emitted (you can choose your units here...)
velocity (VelocityProfile or float): velocity of the emitting ion in m/s
"""
def __init__(self, Ti, w, mu, velocity, amplitude=1):
super(LightSource, self).__init__()
self.temperature = Ti
self.wavelength = w
self.mu = mu
self.amplitude = amplitude
self.velocity = velocity
def __repr__(self):
class_name = type(self).__name__
return "{}({!r}, {!r}, {!r}, {!r}, amplitude={!r})".format(
class_name, self.temperature, self.wavelength, self.mu,
self.velocity, self.amplitude)
@classmethod
def from_dict(cls, light_source):
"""Creates a new instance of LightSource from a dictionary
Args:
light_source (dict): dictionary representing a light source
Returns:
LightSource
"""
temperature = light_source.get('temperature', 0.5)
wavelength = light_source.get('wavelength', 488.0)
mu = light_source.get('mu', 40.0)
amplitude = light_source.get('amplitude', 1.0)
# Oomph, not sure I like this, but I couldn't think of a better way
velocity = light_source.get('velocity', 0.0)
if isinstance(velocity, MutableMapping):
vel_class = velocity.get("class_name", VelocityProfile)
vel_class = globals().get(vel_class, None)
velocity.pop('class_name')
if vel_class is None:
velocity = 0.0
else:
velocity = vel_class(**velocity)
return cls(temperature, wavelength, mu, velocity, amplitude=amplitude)
def to_dict(self):
"""Returns a dict representing itself"""
velocity = 0.0
if self.velocity is not None:
velocity = self.velocity
try:
velocity = velocity.to_dict()
except AttributeError:
pass
return {
'temperature': self.temperature,
'wavelength': self.wavelength,
'mu': self.mu,
'amplitude': self.amplitude,
'velocity': velocity,
}
class UniformPlasma(LightSource):
"""A representation of a uniform density and uniform Ti plasma with ion
species mu emitting light for a Fabry-Perot spectrometer
Attributes:
mu (float): relative mass of the ion
velocity (Union[VelocityProfile,float]): velocity of the emitting ion in m/s
ne (float): electron density in cm^-3
pec (float): photon emissivity coefficient (need to decide on units here)
mu (float): relative mass of the ion
"""
def __init__(self, ne, Ti, pec, w, velocity=None, mu=40.0):
super(UniformPlasma, self).__init__(Ti, w, mu, velocity)
self.ne = ne
self.pec = pec
self.mu = mu
def ion_emission(self, r, wavelength, cos_theta=None):
"""Calculates ion emission at a radius r for wavelengths provided
Args:
r (Union[float, np.ndarray]): radii to calculate ion emission at
wavelength (Union[float, np.ndarray]): wavelength to calculate emission line
profile
cos_theta (Union[float, np.ndarray]): cos(theta) to project velocity onto a unit
vector an angle theta from the toroidal direction
Returns:
np.ndarray
"""
velocity = 0.0
if self.velocity is not None:
velocity = self.velocity
if callable(velocity):
velocity = velocity(r)
if cos_theta is not None:
print('im in the cos theta portion')
cosine = np.asarray(cos_theta)
print(cosine.max(), cosine.min())
velocity *= cosine
line_profile = self.gaussian(wavelength, velocity)
emission = self.ne ** 2 * self.pec * line_profile / (4 * np.pi)
return emission
def chord_emission(self, impact_factor, wavelength):
b = np.asarray(impact_factor)
if np.max(b) < 0.0:
raise ValueError('impact_factor must be greater than or equal to zero')
max_radii = 150.0
x_max = np.sqrt(max_radii ** 2 - b ** 2)
x_arr = np.linspace(0.0, x_max, 1000)
# I need the x_arr and subsequent arrays to be broadcastable with wavelength array
x_arr = x_arr[np.newaxis, :]
w = wavelength[:, np.newaxis]
# theta_arr = np.arctan2(b, x_arr)
cos_theta = b / np.sqrt(b ** 2 + x_arr ** 2)
rarr = np.sqrt(x_arr ** 2 + b ** 2)
print(rarr.shape)
print(w.shape)
emission = self.ion_emission(rarr, w, cos_theta=cos_theta)
radiance = 2.0 * np.trapz(emission, x=x_arr, axis=1)
print(emission.shape, x_arr.shape, w.shape, radiance.shape)
# fig, ax = plt.subplots()
# for i in range(1000):
# if i % 50 == 0:
# ax.plot(wavelength, emission[:, i] / emission.max())
# ax.plot(wavelength, radiance / radiance.max(), 'k')
# plt.show()
return radiance
def gaussian(self, wavelength, velocity):
"""Calculates doppler broadened and shifted gaussian
Args:
wavelength (Union[float, np.ndarray]): wavelength to calculate emission line profile
velocity (Union[float, np.ndarray]): velocity of ion for doppler shift
"""
w = np.asarray(wavelength)
v = np.asarray(velocity)
sigma = self.sigma
w_shift = self.calculate_shift(v)
norm = np.sqrt(2 * np.pi) * sigma
return np.exp(-0.5 * (w - w_shift) ** 2 / sigma ** 2) / norm
@property
def sigma(self):
"""Thermal doppler broadening"""
return np.sqrt(q * self.temperature / self.mass) * self.wavelength / c
def calculate_shift(self, velocity):
"""Calculate doppler shift from the ion velocity
Args:
velocity (Union[float, np.ndarray]): velocity in m/s
Returns:
np.ndarray
"""
return self.wavelength * (1.0 - velocity / c)
@property
def mass(self):
"""Mass of ion in kg"""
return self.mu * mp
def __repr__(self):
class_name = type(self).__name__
return "{}({!r}, {!r}, {!r}, {!r}, velocity={!r}, mu={!r})".format(
class_name, self.ne, self.temperature, self.pec, self.wavelength,
self.velocity, self.mu)
def to_dict(self):
"""Returns a dict representation
"""
velocity = 0.0
if self.velocity is not None:
velocity = self.velocity
try:
velocity = velocity.to_dict()
except AttributeError:
pass
return {
'temperature': self.temperature,
'wavelength': self.wavelength,
'mu': self.mu,
'velocity': velocity,
'pec': self.pec,
'ne': self.ne
}
@classmethod
def from_dict(cls, plasma):
"""Creates a new instance of UniformPlasma from dict
Args:
plasma (dict): dictionary representation of a UniformPlasma
Returns:
UniformPlasma
"""
temperature = plasma.get('temperature', 0.5)
wavelength = plasma.get('wavelength', 488.0)
mu = plasma.get('mu', 40.0)
pec = plasma.get('pec')
ne = plasma.get("ne", 1e12)
# Oomph, not sure I like this, but I couldn't think of a better way
velocity = plasma.get('velocity', 0.0)
if isinstance(velocity, MutableMapping):
vel_class = velocity.get("class_name", VelocityProfile)
vel_class = globals().get(vel_class, None)
velocity.pop('class_name')
if vel_class is None:
velocity = 0.0
else:
velocity = vel_class(**velocity)
return cls(ne, temperature, pec, wavelength, velocity=velocity, mu=mu)
class VelocityProfile(object):
"""Represents a edge driven velocity profile
Attributes:
Vmax (float): maximum velocity
max_radius (float): radial location of the maximum velocity
length_scale (float): scale for the velocity gradient inward radially
edge_scale (float): scale for the velocity edge gradient
R0 (float): location of the edge
offset (float): offset velocity in the center
"""
def __init__(self, Vmax, max_radius, length_scale, edge_scale, R0=140.0, offset=0.0):
super(VelocityProfile, self).__init__()
self.Vmax = Vmax
self.max_radius = max_radius
self.length_scale = length_scale
self.edge_scale = edge_scale
self.R0 = R0
self.offset = offset
def vphi(self, r):
"""Returns the Torodial velocity at r
Args:
r (Union[float, np.ndarray]): radii to evaluate vphi at
Returns:
np.ndarray
"""
radii = np.asarray(r)
right_profile = self.right_edge_profile(radii)
left_profile = self.gaussian(radii)
vel = right_profile * left_profile
vel *= self.Vmax / np.max(vel)
return vel
def right_edge_profile(self, r):
"""Helper function for the edge profile
Args:
r (Union[float, np.ndarray]): radii to evaulate at
Returns:
np.ndarray
"""
return 0.5 * (1.0 - np.tanh((r - self.R0) / self.edge_scale))
def gaussian(self, r):
"""Helper function for the inward velocity gradient
Args:
r (Union[float], np.ndarray]): radii to evaluate at
Returns:
np.ndarray
"""
g = (1 - self.offset / self.Vmax) * np.exp(-(r - self.max_radius) ** 2 / self.length_scale ** 2)
g += self.offset / self.Vmax
# print(self.offset / self.Vmax, 1 + self.offset / self.Vmax)
# fig, ax = plt.subplots()
# ax.plot(r, g)
# plt.show()
return g
def __call__(self, r):
return self.vphi(r)
def __repr__(self):
cls = type(self).__name__
s = "{}({!r},{!r},{!r},{!r},R0={!r},offset={!r})".format(cls, self.Vmax, self.max_radius, self.length_scale,
self.edge_scale, self.R0, self.offset)
return s
def to_dict(self):
"""Returns a dict representation of the VelocityProfile
Returns:
dict
"""
output_dict = {'class_name': type(self).__name__,
'Vmax': self.Vmax,
'max_radius': self.max_radius,
'length_scale': self.length_scale,
'edge_scale': self.edge_scale,
'R0': self.R0,
'offset': self.offset,
}
return output_dict
@classmethod
def from_dict(cls, velocity):
"""Returns a new instance of VelocityProfile from a dict
Args:
velocity (dict): dict reprsentation of a VelocityProfile
Returns:
VelocityProfile
"""
Vmax = velocity.get('Vmax')
max_radius = velocity.get('max_radius')
length_scale = velocity.get('length_scale')
edge_scale = velocity.get('edge_scale')
R0 = velocity.get('R0', 140.0)
offset = velocity.get('offset', 0.0)
return cls(Vmax, max_radius, length_scale, edge_scale, R0=R0, offset=offset)
# def PCX_Plasma(LightSource):
#
# def __init__(self, Ti, w, mu, velocity_outer, R_outer, Lnu, impact_factor):
# super(PCX_Plasma, self).__init__(Ti, w, mu, velocity_outer)
# self.impact_factor = impact_factor
# r, _, _ = plasma.calculate_r_theta_x_from_impact_factor(self.impact_factor, rmax=40, npts=500)
#
# self.velocity =
```
#### File: fabry/plasma/plasma.py
```python
from __future__ import division, print_function
import numpy as np
from scipy import special
from ..core import models
from functools import partial
try:
import matplotlib.pyplot as plt
except ImportError:
pass
cx_fits = {40: [0.39004112, -34.24186523],
4: [0.40712338, -33.82360615],
}
def pcx_couette_velocity_profile(r, mom_dif_length, R_inner, R_outer, V_inner, V_outer):
"""Calculates the torodial velocity profile for PCX
Args:
r (Union[np.ndarray, float]): radii to calculate profile on
mom_dif_length (float): momentum diffusion length scale
R_inner (float): Radius of inner boundary
R_outer (float): Radius of outer boundary
V_inner (float): Velocity at inner boundary
V_outer (float): Velocity at outer boundary
Returns:
np.ndarray: torodial velocity profile as a function of r
"""
x = np.asarray(r) / mom_dif_length
xi = R_inner / mom_dif_length
xo = R_outer / mom_dif_length
Iv = partial(special.iv, 1)
Kv = partial(special.kv, 1)
denom = Iv(xi) * Kv(xo) - Iv(xo) * Kv(xi)
A = Kv(xo) * V_inner - Kv(xi) * V_outer
B = Iv(xi) * V_outer - Iv(xo) * V_inner
A /= denom
B /= denom
return A * special.iv(1, x) + B * special.kv(1, x)
def pcx_velocity_profile(r, mom_dif_length, R_outer, V_outer):
"""Calculates the toroidal velocity profile with no inner boundary for PCX
Args:
r (Union[np.ndarray, float]): raddi to calculate profile on
mom_dif_length (float): momentum diffusion length scale
R_outer (float): Radii for outer boundary
V_outer (float): Velocity at outer boundary
Returns:
np.ndarray: torodial velocity profile as a function of r
"""
x = np.asarray(r) / mom_dif_length
xo = R_outer / mom_dif_length
Iv = partial(special.iv, 1)
vel = V_outer * Iv(x) / Iv(xo)
if isinstance(r, np.ndarray):
if any(rr > R_outer for rr in r):
idx = np.where(r > R_outer)
vel[idx] = V_outer * np.exp(-(r[idx] - R_outer) ** 2 / 4.0 ** 2)
else:
if r > R_outer:
return V_outer * np.exp(-(r - R_outer) ** 2 / 4.0 ** 2)
return vel
def density_profile(r, r_edge, gradient_length_scale):
"""Calculates the electron density profile
Args:
r (Union[np.ndarray, float]): radii to calculate profile on
r_edge (float): edge of the electron density profile
gradient_length_scale (float): length scale of the gradient at r_edge
Returns:
np.ndarray: electron density profile as a function of r
"""
x = np.asarray(r)
return 0.5 * (1.0 - np.tanh((x - r_edge) / gradient_length_scale))
def calculate_r_theta_x_from_impact_factor(impact_factor, rmax=150.0, npts=101):
"""Calculates the radius array, theta array, and the distance along a chord at the specified impact factor
Args:
impact_factor (float): impact factor of the chord
rmax (float): max radius to include in chord
npts (int): number of points to use
Returns:
tuple: (np.ndarray, np.ndarray, np.ndarray) r, theta, x
"""
xmax = np.sqrt(rmax ** 2 - impact_factor ** 2)
x = np.linspace(-1, 1, npts) * xmax
r = np.sqrt(x ** 2 + impact_factor ** 2)
theta = np.arctan2(x, impact_factor)
return r, theta, x
def calculate_line_profile(wavelength, w0, Ti, vel, theta, mu):
"""Calculates the Gaussian line shape for a given set of parameters
Args:
wavelength (np.ndarray): wavelength array
w0 (float): central wavelength
Ti (float): temperature of emitting species
vel (float): toroidal velocity in m/s
theta (float): angle of torodial velocity to line of sight
mu (float): relative mass in amu
Returns:
np.ndarray: gaussian line shape
"""
vel_dot = vel * np.cos(theta)
w_shift = models.doppler_shift(w0, vel_dot)
sigma = models.doppler_broadening(w0, mu, Ti)
return models.gaussian(wavelength, w_shift, sigma, norm=False)
def calculate_pcx_chord_emission(impact_factor, Ti, w0, mu, Lnu, Vouter, rmax=40.0, nr=101, nlambda=2000,
Lne=2.5, R_outer=35):
"""Calculates PCX emission with only the outer boundary spinning for a given impact factor
Args:
impact_factor (float): impact factor for chord
Ti (float): ion temperature in eV
w0 (float): central wavelength
mu (float): mass in amu
Lnu (float): momentum diffusion length
Vouter (float): velocity in m/s for outer boundary
rmax (float): end of the plasma
nr (int): number of radial points to integrate chord with
nlambda (int): number of wavelength points
Lne (float): density gradient scale length at rmax
R_outer (float): velocity at outer boundary
Returns:
tuple: (np.ndarray, np.ndarray) wavelength and spectrum
"""
r, theta, x = calculate_r_theta_x_from_impact_factor(impact_factor, rmax=rmax, npts=nr)
vel = pcx_velocity_profile(r, Lnu, R_outer, Vouter)
# fig, ax = plt.subplots()
# ax.plot(r, vel)
# plt.show()
vel_adjusted = vel * np.cos(theta)
# ToDo: Should really iterate over w0 to handle the He II complex
w_shifted_max = models.doppler_shift(w0, np.max(vel_adjusted))
sigma = models.doppler_broadening(w_shifted_max, mu, Ti)
wavelength = np.linspace(-1, 1, nlambda) * 10.0 * sigma + w_shifted_max
# Now to build a big spectrum matrix
w_shifts = models.doppler_shift(w0, vel_adjusted)
full_spectrum = models.gaussian(wavelength[np.newaxis, :], w_shifts[:, np.newaxis], sigma, amp=1.0, norm=False)
# fig, ax = plt.subplots()
# ax.plot(vel_adjusted, w_shifts)
# plt.show()
dens = density_profile(r, rmax, Lne)
dens = dens[:, np.newaxis]
full_spectrum *= dens ** 2
# fig, ax = plt.subplots()
# for idx, spec in enumerate(full_spectrum):
# ax.plot(wavelength, spec, 'C0')
# ax.axvline(w_shifts[idx], color='C1')
# plt.show()
# print(full_spectrum.shape)
spectrum = np.trapz(full_spectrum, x=x, axis=0)
# print(spectrum.shape)
# fig, ax = plt.subplots()
# ax.plot(wavelength, spectrum / spectrum.max(), 'C1')
# plt.show()
return wavelength, spectrum
def charge_exchange_rate(Ti, mu=40, noise=False):
mass = int(mu)
logTi = np.log(Ti)
cx = np.polyval(cx_fits[mass], logTi)
cx = np.exp(cx)
if noise:
cx = np.random.normal(loc=cx, scale=0.1*cx, size=1)
return cx
def Lnu(ne_n0, Ti, mu=40, noise=False):
sigv_cx = charge_exchange_rate(Ti, mu=mu, noise=noise)
Lnu = np.sqrt(128 * 1e18 * Ti / (np.sqrt(mu) * ne_n0 * sigv_cx))
return Lnu
```
#### File: fabry/tools/file_io.py
```python
from __future__ import print_function, division
import os
import numpy as np
import h5py
def dict_2_h5(fname, dic, append=False):
'''Writes a dictionary to a hdf5 file with given filename
It will use lzf compression for all numpy arrays
Args:
fname (str): filename to write to
dic (dict): dictionary to write
append (bool): if true, will append to file instead of overwriting, default=False
'''
if append:
method = 'r+'
else:
method = 'w'
with h5py.File(fname, method) as h5:
recursive_save_dict_to_h5(h5, '/', dic)
def h5_2_dict(fname):
'''Reads a dictionary from a hdf5 file with given filename
Args:
fname (str): hdf5 filename to read
Returns:
dict: dictionary of hdf5 keys
'''
with h5py.File(fname, 'r') as h5:
return recursive_load_dict_from_h5(h5, '/')
def prep_folder(path):
'''Checks if folder exists and recursively creates folders
to ensure the path is valid
Args:
path (str): path to folder
'''
if os.path.isdir(path):
return
else:
os.makedirs(path)
def recursive_save_dict_to_h5(h5, path, dic):
''' function used in save_dict_to_h5 in order to get recursion
'''
for key, item in dic.items():
if path + key in h5: ### overwrites pre-existing keys with same name
del h5[path + key]
if type(item) in [np.ndarray, np.generic]:
h5.create_dataset(path + key, data=item, compression='lzf')
elif type(item) != dict:
try:
h5.create_dataset(path + key, data=item)
except TypeError:
raise ValueError('Cannot save %s type' % type(item))
else:
recursive_save_dict_to_h5(h5, path + key + '/', item)
def recursive_load_dict_from_h5(h5, path):
''' function used in load_h5_to_dict in order to get recursion
'''
out_dict = {}
for key, item in h5[path].items():
# if type(item) == h5py._hl.dataset.Dataset:
if isinstance(item, h5py.Dataset):
out_dict[key] = item.value
# elif type(item) == h5py._hl.group.Group:
elif isinstance(item, h5py.Group):
out_dict[key] = recursive_load_dict_from_h5(h5, path + key + '/')
return out_dict
def read_Ld_results(Ld_directory):
'''Reads L and d histogram data from multinest run
Args:
Ld_directory (str): path to multinest save directory
Returns:
Tuple (np.ndarray, np.ndarray) L histogram values (in pixels), d histogram values (in mm)
'''
try:
fname = os.path.join(Ld_directory, "Ld_post_equal_weights.dat")
post = np.loadtxt(fname, ndmin=2)
except IOError:
fname = os.path.join(Ld_directory, "Ld_solver_post_equal_weights.dat")
post = np.loadtxt(fname, ndmin=2)
L = post[:, 0]
d = post[:, 1]
return L, d
def read_match_finesse_results(finesse_directory, errtemp=False):
fname = os.path.join(finesse_directory, "F_post_equal_weights.dat")
post = np.loadtxt(fname, ndmin=2)
F = post[:, 0]
V = post[:, 1]
T = post[:, 2]
if errtemp:
E = post[:, 3]
return F, V, T, E
else:
return F, V, T
def read_finesse_results(finesse_directory):
fname = os.path.join(finesse_directory, "finesse_post_equal_weights.dat")
post = np.loadtxt(fname, ndmin=2)
F = post[:, 0]
A = post[:, 1]
Arel = post[:, 2]
Ti = post[:, 3]
return F, A, Arel, Ti
def read_lyon_temp_results(temp_directory):
fname = os.path.join(temp_directory, 'temp_post_equal_weights.dat')
post = np.loadtxt(fname, ndmin=2)
T = post[:, 0]
V = post[:, 1]
# A = post[:,2]
# O = post[:,3]
return T, V # ,A#,O
```
#### File: fabry/tools/helpers.py
```python
from __future__ import print_function, division
import numpy as np
def bin_data(data, npts=100):
n, m = divmod(len(data), npts)
if m != 0:
d = np.pad(data, pad_width=(0, npts-m), mode='constant', constant_values=np.nan)
d = np.reshape(d, (n+1, npts))
else:
d = data.copy()
d = np.reshape(d, (n, npts))
return np.nanmean(d, axis=1), np.nanstd(d, axis=1)
``` |
{
"source": "JmilkFan/openstackclient-api-demo",
"score": 2
} |
#### File: JmilkFan/openstackclient-api-demo/openstack_clients.py
```python
from openstackclient.identity.client import identity_client_v2
from keystoneclient import session as identity_session
import glanceclient
import novaclient.client as novaclient
import cinderclient.client as cinderclient
# FIXME(JmilkFan): Using oslo_config
NOVA_CLI_VER = 2
GLANCE_CLI_VER = 2
CINDER_CLI_VER = 2
class OpenstackClients(object):
"""Clients generator of openstack."""
def __init__(self, auth_url, username, password, tenant_name):
### Identity authentication via keystone v2
# An authentication plugin to authenticate the session with.
auth = identity_client_v2.v2_auth.Password(
auth_url=auth_url,
username=username,
password=password,
tenant_name=tenant_name)
try:
self.session = identity_session.Session(auth=auth)
except Exception as err:
raise
# Return a token as provided by the auth plugin.
self.token = self.session.get_token()
def get_glance_client(self, interface='public'):
"""Get the glance-client object."""
# Get an endpoint as provided by the auth plugin.
glance_endpoint = self.session.get_endpoint(service_type="image",
interface=interface)
# Client for the OpenStack Images API.
glance_client = glanceclient.Client(GLANCE_CLI_VER,
endpoint=glance_endpoint,
token=self.token)
return glance_client
def get_nova_client(self):
"""Get the nova-client object."""
# Initialize client object based on given version. Don't need endpoint.
nova_client = novaclient.Client(NOVA_CLI_VER, session=self.session)
return nova_client
def get_cinder_client(self, interface='public'):
"""Get the cinder-client object."""
cinder_endpoint = self.session.get_endpoint(service_type='volume',
interface=interface)
cinder_client = cinderclient.Client(CINDER_CLI_VER, session=self.session)
return cinder_client
``` |
{
"source": "jmilkiewicz/iot-starterkit",
"score": 3
} |
#### File: ws-interaction/ws-client-including-keepalive/ws-client-including-keepalive.py
```python
import asyncio
from contextlib import suppress
import aiohttp
from time import gmtime, strftime
import sys
import requests
config_instance='<example IoTS CF instance>'
config_alternate_id_4_device="<example_alternate_id>"
def do_upstream_request(payload):
global config_do_upstream_request
global config_instance
global alternate_id_4_device
print("Doing upstream request with payload: " + payload)
alternate_id_4_capability=config_alternate_id_4_device
alternate_id_4_sensor=config_alternate_id_4_device
request_url='https://' + config_instance + '/iot/gateway/rest/measures/' + config_alternate_id_4_device
headers={'Content-Type' : 'application/json'}
# print(request_url)
# print(payload)
try:
response=requests.post(request_url, data=payload, headers=headers, cert=('./credentials.crt', './credentials.key'))
print(response.status_code)
print(response.text)
except:
print("an exception occured")
ping_interval=10
_ws=''
async def do_forever():
while True:
# print("forever")
time_string=strftime("%Y-%m-%d %H:%M:%S", gmtime())
await asyncio.sleep(ping_interval)
if (_ws != ''):
print("ping at " + time_string)
_ws.ping()
sys.stdout.flush()
async def main():
global _ws
asyncio.ensure_future(do_forever())
session = aiohttp.ClientSession()
async with session.ws_connect('ws://localhost:8765/') as ws:
_ws=ws
async for msg in ws:
print("received: " + str(msg.data))
# EXTENSION POINT
# process the received data and build a payload for upstream interaction with the IoT Service
# for simplicity we choose the same alternateId for all its types when creating the device
payload='{ "capabilityAlternateId" : "' + config_alternate_id_4_device + '" , "measures" : [[ "' + str(msg.data) + '" ]], "sensorAlternateId":"' + config_alternate_id_4_device + '" }'
do_upstream_request(payload)
sys.stdout.flush()
if msg.type == aiohttp.WSMsgType.CLOSED:
await ws.close()
break
elif msg.type == aiohttp.WSMsgType.ERROR:
await ws.close()
break
print("broken out")
sys.stdout.flush()
print("websocket client keeping the connection alive each " + str(ping_interval) + " seconds with a ping")
sys.stdout.flush()
loop=asyncio.get_event_loop()
loop.run_until_complete(main())
print("canceling pending tasks")
sys.stdout.flush()
pending=asyncio.Task.all_tasks()
for task in pending:
task.cancel()
with suppress(asyncio.CancelledError):
loop.run_until_complete(task)
sys.stdout.flush()
```
#### File: python/iot-starterkit-for-desktop/iot_starterkit_desktop.py
```python
try:
import config
except ImportError:
print("Please copy template-config.py to config.py and configure appropriately !"); exit();
# this can be used to activate debugging
# debug_communication=1
debug_communication=0
try:
# for Python2
from Tkinter import *
except ImportError:
# for Python3
from tkinter import *
import json
import time
import urllib3
def send_to_hcp():
global debug_communication
global http
global url
global headers
global s1
timestamp=int(time.time())
# print(timestamp)
body='{"mode":"async", "messageType":"' + str(config.message_type_id_From_device) + '", "messages":[{"sensor":"slider_desktop", "value":"' + str(s1.get()) + '", "timestamp":' + str(timestamp) + '}]}'
# print(body)
r = http.urlopen('POST', url, body=body, headers=headers)
if (debug_communication == 1):
print("send_to_hcp():" + str(r.status))
print(r.data)
def poll_from_hcp():
global debug_communication
global http
global url
global headers
global t1
global f4_cb1
r = http.urlopen('GET', url, headers=headers)
if (debug_communication == 1):
print("poll_from_hcp():" + str(r.status))
print(r.data)
json_string='{"all_messages":'+(r.data).decode("utf-8")+'}'
# print(json_string)
try:
json_string_parsed=json.loads(json_string)
# print(json_string_parsed)
# take care: if multiple messages arrive in 1 payload - their order is last in / first out - so we need to traverse in reverese order
try:
messages_reversed=reversed(json_string_parsed["all_messages"])
for single_message in messages_reversed:
# print(single_message)
payload=single_message["messages"][0]
opcode=payload["opcode"]
operand=payload["operand"]
# print(opcode)
# print(operand)
# now do things depending on the opcode
if (opcode == "display"):
t1.config(state=NORMAL)
t1.delete(1.0, END)
t1.insert(END, operand)
t1.config(state=DISABLED)
if (opcode == "led"):
f4_cb1.config(state=NORMAL)
if (operand == "0"):
f4_cb1.deselect()
if (operand == "1"):
f4_cb1.select()
f4_cb1.config(state=DISABLED)
except TypeError:
print("Problem decoding the message " + (r.data).decode("utf-8") + " retrieved with poll_from_hcp()! Can and will continue though.")
except ValueError:
print("Problem decoding the message " + (r.data).decode("utf-8") + " retrieved with poll_from_hcp()! Can and will continue though.")
def handle_slider(event):
global do_send
global s1
global cb1
value=s1.get()
if (value == 100):
do_send=1
cb1.config(state=NORMAL)
cb1.select()
cb1.config(state=DISABLED)
# print("Start sending now !")
if (value == 0):
do_send=0
# print("Stop sending now !")
cb1.config(state=NORMAL)
cb1.deselect()
cb1.config(state=DISABLED)
# print("slider value: ", value)
def handle_exit_button():
exit()
def my_send_timer():
global root
global do_send
# print("my_send_timer")
if (do_send == 1):
send_to_hcp()
root.after(1000, my_send_timer)
def my_poll_timer():
global root
# print("my_poll_timer")
poll_from_hcp()
root.after(1000, my_poll_timer)
def build_and_start_ui_with_timers():
global root
global s1
global cb1
global f4_cb1
global t1
root=Tk()
root.resizable(FALSE,FALSE)
root.title("IoT Starterkit - Device Simulator")
root.geometry('+50+50')
l1=Label(root, text="Data that the device sends", font = "TkDefaultFont 14 bold")
l1.pack()
l2=Label(root, text="Slide to 100 to start sending values once per second, slide to 0 to stop sending")
l2.pack()
s1=Scale(root, from_=0, to=100, orient=HORIZONTAL, command = handle_slider)
s1.configure(length=500)
s1.pack()
cb1=Checkbutton(root, text="Sending now", state=DISABLED)
cb1.pack()
f1=Frame(root, height=3, width=500)
f1.pack()
f2=Frame(root, height=1, width=500, bg="black")
f2.pack()
f3=Frame(root, height=3, width=500)
f3.pack()
l3=Label(root, text="Data that the device receives", font = "TkDefaultFont 14 bold")
l3.pack()
f4=Frame(root, width=500)
f4.pack()
f4.l1=Label(f4, text="Remote controlled LED (on/off)")
f4.l1.pack(side=LEFT)
f4_cb1=Checkbutton(f4, state=DISABLED)
f4_cb1.pack(side=LEFT)
l4=Label(root, text="Messages sent to the device")
l4.pack()
t1=Text(root, height=10, width=70, borderwidth=2, relief=SUNKEN, state=DISABLED)
# t1=Text(root, height=10, width=50, borderwidth=2)
t1.pack()
t1.config(state=NORMAL)
t1.insert(END, "Nothing received yet")
t1.config(state=DISABLED)
b1=Button(root, text="Exit", command=handle_exit_button)
b1.pack()
my_send_timer()
my_poll_timer()
root.mainloop()
# === main starts here ===============================================
# disable InsecureRequestWarning if your are working without certificate verification
# see https://urllib3.readthedocs.org/en/latest/security.html
# be sure to use a recent enough urllib3 version if this fails
try:
urllib3.disable_warnings()
except:
print("urllib3.disable_warnings() failed - get a recent enough urllib3 version to avoid potential InsecureRequestWarning warnings! Can and will continue though.")
# use with or without proxy
if (config.proxy_url == ''):
http = urllib3.PoolManager()
else:
http = urllib3.proxy_from_url(config.proxy_url)
url='https://iotmms' + config.hcp_account_id + config.hcp_landscape_host + '/com.sap.iotservices.mms/v1/api/http/data/' + str(config.device_id)
headers = urllib3.util.make_headers(user_agent=None)
# use with authentication
headers['Authorization'] = 'Bearer ' + config.oauth_credentials_for_device
headers['Content-Type'] = 'application/json;charset=utf-8'
do_send=0
build_and_start_ui_with_timers()
```
#### File: python/mqtt-over-wss/mqtt-wss-sample.py
```python
try:
import config
except ImportError:
print("Please copy template-config.py to config.py and configure appropriately !"); exit();
import sys
import paho.mqtt.client as mqtt
import time
import random
# === START === values set from the config.py file ===
my_endpoint = "iotmms" + config.hcp_account_id + config.hcp_landscape_host
my_endpoint_certificate = config.endpoint_certificate
# only needed when using Client Certificate Authentication; my_username and my_password can be skipped in this case
# my_client_certificate = config.client_certificate
# my_client_key = config.client_key
my_device_id = config.device_id
my_client_id = my_device_id
my_username = my_device_id
my_password = <PASSWORD>for_device
my_message_type_upstream = config.message_type_id_From_device
# === END ===== values set from the config.py file ===
my_endpoint_url_path = "/com.sap.iotservices.mms/v1/api/ws/mqtt"
# for upstream communication
my_publish_topic = "iot/data/" + my_device_id
# for downstream communication
my_subscription_topic = "iot/push/" + my_device_id
is_connected = False
def on_connect(mqttc, obj, flags, rc):
print("on_connect - rc: " + str(rc))
global is_connected
is_connected = True
# you can use the push API (e.g. also from the built-in sample UI) to send to the device
def on_message(mqttc, obj, msg):
print("on_message - " + msg.topic + " " + str(msg.qos) + " " + str(msg.payload))
def on_publish(mqttc, obj, message_id):
print("on_publish - message_id: " + str(message_id))
def on_subscribe(mqttc, obj, message_id, granted_qos):
print("on_subscribe - message_id: " + str(message_id) + " / qos: " + str(granted_qos))
def on_log(mqttc, obj, level, string):
print(string)
mqttc = mqtt.Client(client_id=my_client_id, transport='websockets')
mqttc.on_message = on_message
mqttc.on_connect = on_connect
mqttc.on_publish = on_publish
mqttc.on_subscribe = on_subscribe
mqttc.tls_set(my_endpoint_certificate)
# to use Client Certificate Authentication, also specifiy the client certificate and key; setting the username and password can be skipped in this case
# mqttc.tls_set(my_endpoint_certificate, my_client_certificate, my_client_key)
mqttc.username_pw_set(my_username, my_password)
# setting an individual path for the WebSocket endpoint needs the development version of the Paho MQTT python lib and is possible since May 2017
# please adapt accordingly if you used the individually patched library version with mqttc.endpoint_url_path_set(my_endpoint_url_path) before
mqttc.ws_set_options(my_endpoint_url_path)
mqttc.connect(my_endpoint, 443, 60)
# you can use the push API (e.g. also from the built-in sample UI) to send to the device
mqttc.subscribe(my_subscription_topic, 0)
mqttc.loop_start()
publish_interval=5
value=0
while 1==1:
if is_connected == True:
timestamp = int(time.time())
# == START ==== fill the payload now - in this example we use the typical IoT Starterkit payload ======
my_mqtt_payload='{"messageType":"' + my_message_type_upstream + '","messages":[{'
my_mqtt_payload=my_mqtt_payload + '"sensor":"mqtt-example", '
my_mqtt_payload=my_mqtt_payload + '"value":"' + str(value) + '", '
my_mqtt_payload=my_mqtt_payload + '"timestamp":' + str(timestamp)
my_mqtt_payload=my_mqtt_payload + '}]}'
# == END ====== fill the payload now - in this example we use the typical IoT Starterkit payload ======
print(my_mqtt_payload)
mqttc.publish(my_publish_topic, my_mqtt_payload, qos=0)
value=value+10
if (value > 100):
value=0
else:
print("still waiting for connection")
time.sleep(publish_interval)
``` |
{
"source": "jmillanacosta/scrape-Malaysian-FoodTables",
"score": 3
} |
#### File: jmillanacosta/scrape-Malaysian-FoodTables/scrapeMyFCD.py
```python
import textwrap
import json
import requests
import re
import pandas as pd
import time
# Define variables for each of the modules A, B, C.
### Shared properties ###
my_headers = {'User-Agent': 'Mozilla/5.0'}
title_tags = "<h3>"
begin_JSON_tag, end_JSON_tag = "var product_nutrients = ", ";\\n"
### A) Current module ###
# Set up request parameters
identifier_site_A = "https://myfcd.moh.gov.my/myfcdcurrent/index.php/ajax/datatable_data"
pattern_A = "R\d+\d+\d+\d+\d+\d+" # Identifier for each food item
url1_A, url2_A = "https://myfcd.moh.gov.my/myfcdcurrent/index.php/site/detail_product/", "/0/168/-1/0/0" # Each pattern goes inbetween these
### B) Industry module ###
identifier_site_B = "https://myfcd.moh.gov.my/myfcdindustri//static/DataTables-1.10.12/examples/server_side/scripts/server_processing.php"
pattern_B = "\d+\d+\d+\d+\d+\d+\d+" # Identifier for each food item
url1_B, url2_B = "https://myfcd.moh.gov.my/myfcdindustri/index.php/site/detail_product/", "/0/10/-1/0/0/" # Each pattern goes inbetween these
### C) 1997 module ###
identifier_site_C = "https://myfcd.moh.gov.my/myfcd97/index.php/ajax/datatable_data"
pattern_C = "\d+\d+\d+\d+\d+\d+" # Identifier for each food item
url1_C, url2_C = "https://myfcd.moh.gov.my/myfcd97/index.php/site/detail_product/", "/0/10/-1/0/0/" # Each pattern goes inbetween these
# Functions
## Function that returns all food item sites (urls). Identifier site is where the JS table data is stored.
def requestFoodItems(headers, identifier, pattern, url1, url2):
r = requests.get(identifier, headers=headers)
parsed = r.text
matches = re.findall(pattern, parsed)
urls = []
# Assemble each identifier's URL
progress = 0
for match in matches:
progress += 1
url = "".join(("".join((url1, match)), url2))
urls.append(url)
print("Gathering all food item URLS... {}/{}".format(progress, len(matches)))
return urls
## A function that creates the nutrition dictionary that will gather and store the data scraped from the website
def make_nutrition_tables(urls, headers, make_dummy_dict, fix_nutrient_name = False):
nutrition = dict()
# Analyze each URL (each food)
progress_urls = 0
for url in urls:
time.sleep(2)
progress_urls += 1
print("Requesting url #{}/{}".format(progress_urls, len(urls)))
# Request web
my = requests.get(url, headers=headers)
# Parse html to string
parsed = str(my.content)
# Food name is between headers <h3>
nameIndex = ((len("<h3>") + parsed.find("<h3>")), (parsed.find("</h3")))
name = parsed[nameIndex[0]:nameIndex[1]]
## Exclude the code at the end of the name
indexCode = name.find("<")
name = name[0:indexCode]
# Retrieve the JSON containing nutrition values
beginJSON = parsed.find("var product_nutrients = ")
beginJSON = beginJSON + len("var product_nutrients = ")
endJSON = parsed.find(";\\n", beginJSON)
nutriJSON = parsed[beginJSON:endJSON]
nutriJSON = json.loads(nutriJSON)
print("Creating food item dictionary...")
if(make_dummy_dict == True): # In Modules B) and C) they did not add key names. To work as a dictionary, I will add dummy key names
i = 0
nutriJSONdict = dict()
for item in nutriJSON:
dummy = i
i = i+1
nutriJSONdict[dummy] = item
nutriJSON = nutriJSONdict
print("Added item #{} to the dictionary...".format(i))
# Create list to store nutritional values for this item
nutrients = list()
# Retrieve the subdictionary entry containing the value of each nutrient
for nutrient in nutriJSON.keys():
value = nutriJSON[nutrient]["value"]
if(fix_nutrient_name == True): # The JSON from C) Module 1997 doesn´t name each nutrient entry by its name, but the name can be found as a key.
nutrient = nutriJSON[nutrient]["name"]
nutrientValue = (nutrient, value)
nutrients.append(nutrientValue)
print("Food item: {}, nutrient: {} successfully added to dictionary".format(name, nutrient))
# Append each entry to the nutrition dictionary
nutrition[name] = dict(nutrients)
print("Finished scraping this module.")
return nutrition
# Main
def main():
"""
Scrape the Malaysian Food Composition Database
"""
delim = '-' * 79
print(textwrap.dedent(
"""
Title:\t\tScrape the Malaysian Food Composition Database
Author:\t\t<NAME>
Date:\t\tSeptember 2021
Important:\tRequests are slowed down to play nice with the website. Code will take some minutes.
{}""".format(delim)
))
# Request all urls
time.sleep(5) # Give some time to read my name
print("Requesting urls.")
print("Requesting urls for module A)")
urls_A = requestFoodItems(my_headers, identifier_site_A, pattern_A, url1_A, url2_A)
print("Requesting urls for module B)")
urls_B = requestFoodItems(my_headers, identifier_site_B, pattern_B, url1_B, url2_B)
print("Requesting urls for module C)")
urls_C = requestFoodItems(my_headers, identifier_site_C, pattern_C, url1_C, url2_C)
print(delim)
# Fill up nutrition dictionary
print("Creating nutrition dictionary for all modules")
print("MODULE A) CURRENT MALAYSIAN FCD")
nutrition = make_nutrition_tables(urls_A, my_headers, make_dummy_dict = False)
print("MODULE B) INDUSTRY MODULE")
nutrition_B = make_nutrition_tables(urls_B, my_headers, make_dummy_dict = True)
print("MODULE C) 1997 MODULE")
nutrition_C = make_nutrition_tables(urls_C, my_headers, make_dummy_dict = True, fix_nutrient_name =True)
nutrition.update(nutrition_B)
nutrition.update(nutrition_C)
print("Dictionary complete")
print(delim)
# Create a data frame and export it in csv
nutritionDf = pd.DataFrame(nutrition)
nutritionDf.to_csv("fctMalaysia.csv")
# Create a JSON and export it too
with open("fctJSON", "w") as json_file:
json.dump(nutrition, json_file)
print("Successfully exported fctJSON.JSON and fctMalaysia.csv.")
if __name__ == "__main__":
main()
``` |
{
"source": "Jmillan-Dev/drf-query-filter",
"score": 2
} |
#### File: drf-query-filter/drf_query_filter/mixins.py
```python
from django.db.models import Q
from rest_framework.exceptions import ValidationError
from rest_framework.compat import coreschema
from drf_query_filter.utils import ConnectorType
class Range:
default_list_separator = ','
def __init__(self, *args,
list_separator: str = None,
equal: bool = False,
allow_empty: bool = True,
**kwargs):
self.list_separator = list_separator or self.default_list_separator
self.equal = equal
self.allow_empty = allow_empty
self.target_fields = None
super().__init__(*args, **kwargs)
def get_target_fields(self):
if self.equal:
return [('%s__gte' % target_field, '%s__lte' % target_field) for target_field
in self.target_fields]
else:
return [('%s__gt' % target_field, '%s__lt' % target_field) for target_field
in self.target_fields]
def validate(self, value):
""" we need to divide the value into two values """
value = value.split(self.list_separator)
# check length
if len(value) < 2:
raise ValidationError(
'Not enough values, was only given `%s`, it needs at least 2' % len(value),
code='not_enough_values')
new_values = []
for v in value:
if self.allow_empty and len(v) == 0:
new_values.append(None) # ignore the value but push a null value
else:
new_values.append(super().validate(v))
return new_values
def get_query(self):
query = Q(_connector=self.connector)
for target_field_gt, target_field_lt in self.get_target_fields():
query_dict = {}
if self.value[0]:
query_dict[target_field_gt] = self.value[0]
if self.value[1]:
query_dict[target_field_lt] = self.value[1]
if self.connector == ConnectorType.AND:
query &= Q(**query_dict)
elif self.connector == ConnectorType.OR:
query |= Q(**query_dict)
return query
def get_description(self):
""" We update the original description by adding a format into it... since the
swagger specification does not support our interesting and complicated way to
pass a range values"""
original_type = super().get_schema()
if 'format' in original_type:
schema_type = '%s:%s' % (
original_type.get('type', ''),
original_type['format']
)
else:
schema_type = original_type.get('type', '')
return '%s\n Format: %s' % (
super().get_description(),
schema_type,
)
def get_coreschema_field(self):
return coreschema.String(
format=r'(\w*),(\w*)'
)
def get_schema(self):
return {
'type': 'string',
'format': r'(\w*),(\w*)',
}
```
#### File: drf-query-filter/tests/test_fields.py
```python
from datetime import datetime
from decimal import Decimal
from django.core.validators import (
EmailValidator,
URLValidator,
MaxValueValidator,
MinValueValidator,
)
from django.db.models import Q
from django.test import TestCase, override_settings
from django.utils import timezone
from drf_query_filter.fields import (
Empty,
Field,
ExistsField,
FloatField,
BooleanField,
ChoicesField,
DecimalField,
IntegerField,
ConcatField,
DateTimeField,
DateField,
RangeFloatField,
RangeIntegerField,
RangeDecimalField,
RangeDateField,
default_timezone,
)
class FieldTests(TestCase):
def test_target_field(self):
"""
Testing the creation of the target fields
"""
self.assertEqual(Field('field', 'target_field').target_fields, ['target_field'])
self.assertEqual(Field('field').target_fields, ['field'])
self.assertEqual(Field('field', ['one', 'two']).target_fields, ['one', 'two'])
self.assertEqual(Field('field', ('target_field',)).target_fields,
('target_field',))
def test_call(self):
"""
Testing the __call__ method
"""
field = Field('field')
self.assertTrue(field({'field': 'value'}))
self.assertEqual(field._raw_value, 'value')
self.assertFalse(field({}))
self.assertTrue(isinstance(field._raw_value, Empty))
self.assertFalse(field({'other_field': 'value'}))
self.assertTrue(isinstance(field._raw_value, Empty))
def test_validators(self):
""" Testing to see if Django validators are working with the fields """
field = Field('field', validators=[EmailValidator()])
field({'field': '<EMAIL>'})
self.assertTrue(field.is_valid())
field({'field': 'not_an_email'})
self.assertFalse(field.is_valid())
self.assertEqual(field._errors[0].code, 'invalid', field._errors)
field = Field('field', validators=[URLValidator(code='awful')])
field({'field': 'https://127.0.0.1:8000/'})
self.assertTrue(field.is_valid())
field({'field': 'not_an_url'})
self.assertFalse(field.is_valid())
self.assertEqual(field._errors[0].code, 'awful', field._errors)
def test_get_query(self):
""" Test the generation of queries """
field = Field('field', 'target_field')
field({'field': 'value'})
field.is_valid()
self.assertEqual(str(field.get_query()), str(Q(target_field='value')))
field = Field('field', ['target_one', 'target_two'])
field({'field': 'value'})
field.is_valid()
self.assertEqual(str(field.get_query()),
str(Q(target_one='value') & Q(target_two='value')))
field = Field('field')
field({'field': 'value'})
field.is_valid()
self.assertEqual(str(field.get_query()), str(Q(field='value')))
class ExistsFieldTests(TestCase):
def test_get_value_query(self):
field = ExistsField('field', return_value='My_custom_value')
self.assertTrue(field({'field': None}))
self.assertTrue(field.is_valid())
self.assertEqual(field.value, None)
self.assertEqual(field.get_value_query(), 'My_custom_value')
self.assertEqual(str(field.get_query()), str(Q(field='My_custom_value')))
class NumericFieldsTests(TestCase):
def test_integer_validate(self):
field = IntegerField('field')
field({'field': '0123'})
self.assertTrue(field.is_valid(), field._errors)
self.assertEqual(field.value, 123)
field({'field': '10.69'})
self.assertFalse(field.is_valid())
self.assertEqual(field._errors[0].code, 'invalid')
field({'field': 'not_a_number'})
self.assertFalse(field.is_valid())
self.assertEqual(field._errors[0].code, 'invalid')
def test_float_validate(self):
field = FloatField('field')
field({'field': '0123'})
self.assertTrue(field.is_valid(), field._errors)
self.assertEqual(field.value, 123)
field({'field': '10.69'})
self.assertTrue(field.is_valid(), field._errors)
self.assertEqual(field.value, float('10.69'))
field({'field': 'not_a_number'})
self.assertFalse(field.is_valid())
self.assertEqual(field._errors[0].code, 'invalid')
def test_decimal_validate(self):
field = DecimalField('field')
field({'field': '0123'})
self.assertTrue(field.is_valid())
self.assertEqual(field.value, Decimal('0123'))
field({'field': '10.69'})
self.assertTrue(field.is_valid(), field._errors)
self.assertEqual(field.value, Decimal('10.69'))
field({'field': 'not_a_number'})
self.assertFalse(field.is_valid())
self.assertEqual(field._errors[0].code, 'invalid')
def test_validators(self):
""" Using numeric validators of Django """
for field_class in [IntegerField, FloatField, DecimalField]:
field = field_class('field', validators=[MinValueValidator(3),
MaxValueValidator(10)])
field({'field': '10'})
self.assertTrue(field.is_valid(), field._errors)
field({'field': '0'})
self.assertFalse(field.is_valid())
self.assertEqual(field._errors[0].code, 'min_value')
field({'field': '100'})
self.assertFalse(field.is_valid())
self.assertEqual(field._errors[0].code, 'max_value')
class ChoicesFieldTests(TestCase):
def test_validate(self):
field = ChoicesField('field', choices=['green', 'red', 'yellow'])
for value in ['green', 'red', 'yellow']:
field({'field': value})
self.assertTrue(field.is_valid(), field._errors)
for value in ['greenly', '']:
field({'field': value})
self.assertFalse(field.is_valid())
self.assertEqual(field._errors[0].code, 'not_in_choices')
field = ChoicesField('field', choices=['car', 'Plane', 'BOAT'])
for value in ['Car', 'plane', 'bOAT']:
field({'field': value})
self.assertFalse(field.is_valid())
self.assertEqual(field._errors[0].code, 'not_in_choices')
class BooleanFieldTests(TestCase):
def test_validate(self):
field = BooleanField('field')
for value in ['true', 'false', '0', '1']:
field({'field': value})
self.assertTrue(field.is_valid(), value)
for value in ['verdadero', 'falso', '____', '']:
field({'field': value})
self.assertFalse(field.is_valid(), value)
def test_value(self):
field = BooleanField('field')
field({'field': 'true'})
field.is_valid()
self.assertTrue(field.value)
field({'field': 'false'})
field.is_valid()
self.assertFalse(field.value)
def test_invert(self):
field = BooleanField('field', invert=True)
field({'field': 'true'})
field.is_valid()
self.assertFalse(field.value)
field({'field': 'false'})
field.is_valid()
self.assertTrue(field.value)
class ConcatFieldTests(TestCase):
def test_annotate(self):
# we cannot really compare the Concat values so we just compare the result
# field name generated
field = ConcatField('field', ['field_one', 'field_two'])
self.assertTrue('field_one_field_two', field.get_annotate())
field = ConcatField('field', ['field_one__element', 'field__other'])
self.assertTrue('field_one_element_field_other', field.get_annotate())
field = ConcatField('field', ['field_one', 'field_two'],
target_field_name='field_annotate')
self.assertIn('field_annotate', field.get_annotate())
def test_get_query(self):
field = ConcatField('field', ['field_one', 'field_two'])
field({'field': 'value'})
field.is_valid()
self.assertEqual(str(field.get_query()), str(Q(field_one_field_two='value')))
field = ConcatField('field', ['field_one', 'field_two'], lookup='icontains')
field({'field': 'value'})
field.is_valid()
self.assertEqual(str(field.get_query()),
str(Q(field_one_field_two__icontains='value')))
class DateFieldTests(TestCase):
def test_validate(self):
field = DateField('field')
field({'field': '2020-12-31'})
self.assertTrue(field.is_valid(), field._errors)
self.assertEqual(field.value, datetime(year=2020, month=12, day=31).date())
field({'field': '2020-12-12T10:25:30Z'})
self.assertFalse(field.is_valid())
self.assertEqual(field._errors[0].code, 'wrong_format', field._errors)
field({'field': '31-12-2020'})
self.assertFalse(field.is_valid())
self.assertEqual(field._errors[0].code, 'wrong_format', field._errors)
class DateTimeFieldTests(TestCase):
def test_validate(self):
field = DateTimeField('field')
field({'field': '2020-1-1T10:25:30Z'})
self.assertTrue(field.is_valid(), field._errors)
self.assertEqual(
field.value,
datetime(year=2020, month=1, day=1, hour=10, minute=25, second=30)
)
field({'field': '2021-30-12'})
self.assertFalse(field.is_valid())
self.assertEqual(field._errors[0].code, 'wrong_format', field._errors)
field({'field': '31-12-2020T10:10:10'})
self.assertFalse(field.is_valid())
self.assertEqual(field._errors[0].code, 'wrong_format', field._errors)
@override_settings(USE_TZ=True)
def test_validate_forcing_timezone(self):
field = DateTimeField('field')
field({'field': '2020-1-1T10:25:30Z'})
_datetime = datetime(year=2020, month=1, day=1, hour=10, minute=25, second=30)
self.assertTrue(field.is_valid(), field._errors)
self.assertNotEqual(field.value, _datetime)
self.assertEqual(field.value, timezone.make_aware(_datetime, default_timezone()))
class TestingRangeMixin(TestCase):
def validate(self, field_class, values, is_true=True):
field = field_class('field')
field({'field': values})
if is_true:
self.assertTrue(field.is_valid(), field._errors)
def test_validate(self):
field_classes = [RangeIntegerField, RangeFloatField, RangeDecimalField]
for field_class in field_classes:
field = field_class('field')
field({'field': '1,10'})
self.assertTrue(field.is_valid(), field._errors)
field({'field': ',10'})
self.assertTrue(field.is_valid(), field._errors)
field({'field': '1,'})
self.assertTrue(field.is_valid(), field._errors)
field = RangeDateField('field')
field({'field': '2020-1-1,2020-12-31'})
self.assertTrue(field.is_valid(), field._errors)
field({'field': ',2020-12-31'})
self.assertTrue(field.is_valid(), field._errors)
field({'field': '2020-1-1,'})
self.assertTrue(field.is_valid(), field._errors)
def get_validate_query(self, field_class, value_a, value_b):
field = field_class('field', equal=False)
field({'field': '%s,%s' % (value_a, value_b)})
self.assertTrue(field.is_valid(), field._errors)
self.assertEqual(str(field.get_query()),
str(Q(**{'field__gt': value_a, 'field__lt': value_b})))
field = field_class('field', equal=True)
field({'field': '%s,%s' % (value_a, value_b)})
self.assertTrue(field.is_valid(), field._errors)
self.assertEqual(str(field.get_query()),
str(Q(**{'field__gte': value_a, 'field__lte': value_b})))
def test_get_query(self):
self.get_validate_query(RangeIntegerField, 1, 10)
self.get_validate_query(RangeFloatField, 1.0, 10.0)
self.get_validate_query(RangeDecimalField, Decimal(1), Decimal(10))
``` |
{
"source": "jmillegard/pymat",
"score": 4
} |
#### File: jmillegard/pymat/mode.py
```python
from collections import Counter
def calculate_mode(numbers):
c = Counter(numbers)
numbers_freq = c.most_common()
max_count = numbers_freq[0][1]
modes = []
for num in numbers_freq:
if num[1] == max_count:
modes.append(num[0])
return modes
if __name__ == '__main__':
try:
inp = input('Enter some numbers separated by whitespace: ').split(' ')
numbers = [int(num) for num in inp]
mean = calculate_mode(numbers)
print('Mode(s) of numbers is {0}'.format(mean))
except ValueError:
print('Invalid input')
``` |
{
"source": "jmilleralpine/hadrian",
"score": 2
} |
#### File: lib/model/testNaive.py
```python
import unittest
from titus.genpy import PFAEngine
from titus.errors import *
class TestLib1ModelNaive(unittest.TestCase):
#GAUSSIAN###############################################################
def testNaiveGaussian(self):
# test array signature, then map signature
# from sklearn.naive_bayes import GaussianNB
# clf = GaussianNB()
# X = np.array([[-1, 0, 1],
# [-1, 0, 1],
# [-3, 2, 3],
# [-3, 2, 3],
# [ 0, -1, -1],
# [-2, 3, 3]])
# Y = np.array([1, 1, 1,1, 2, 2])
# clf.fit(X,Y)
engine, = PFAEngine.fromYaml("""
input: {type: array, items: double}
output: {type: array, items: double}
cells:
twoClass:
type:
type: map
values:
type: array
items:
type: record
name: parameters
fields:
- {name: mean, type: double}
- {name: variance, type: double}
init:
{class1: [{mean: -2.0, variance: 1.0},
{mean: 1.0, variance: 1.0},
{mean: 2.0, variance: 1.0}],
class2: [{mean: -1.0, variance: 1.0},
{mean: 1.0, variance: 4.0},
{mean: 1.0, variance: 4.0}]}
action:
- let:
class1params: {cell: twoClass, path: [{string: class1}]}
class2params: {cell: twoClass, path: [{string: class2}]}
- let:
class1LL: {model.naive.gaussian: [input, class1params]}
class2LL: {model.naive.gaussian: [input, class2params]}
- let:
classLL:
type: {type: array, items: double}
new:
- class1LL
- class2LL
- let:
class1Lprior: -0.40546511
class2Lprior: -1.09861229
- let:
classLPost:
type: {type: array, items: double}
new:
- "+": [class1LL, class1Lprior]
- "+": [class2LL, class2Lprior]
- let:
C: {a.logsumexp: [classLPost]}
- a.map:
- classLPost
- params: [{x: double}]
ret: double
do: {"-": [x, C]}
""")
pfa_output = engine.action([-1.0, 0.0, 1.0])
true_value = [-0.4017144770379799, -1.1061560181578711]
for p,t in zip(pfa_output, true_value):
self.assertAlmostEqual(p, t, places=3)
engine, = PFAEngine.fromYaml("""
input: {type: map, values: double}
output: {type: array, items: double}
cells:
twoClass:
type:
type: map
values:
type: map
values:
type: record
name: parameters
fields:
- {name: mean, type: double}
- {name: variance, type: double}
init:
{class1: {f1: {mean: -2.0, variance: 1.0},
f2: {mean: 1.0, variance: 1.0},
f3: {mean: 2.0, variance: 1.0}},
class2: {f1: {mean: -1.0, variance: 1.0},
f2: {mean: 1.0, variance: 4.0},
f3: {mean: 1.0, variance: 4.0}}}
action:
- let:
class1params: {cell: twoClass, path: [{string: class1}]}
class2params: {cell: twoClass, path: [{string: class2}]}
- let:
class1LL: {model.naive.gaussian: [input, class1params]}
class2LL: {model.naive.gaussian: [input, class2params]}
- let:
classLL:
type: {type: array, items: double}
new:
- class1LL
- class2LL
- let:
class1Lprior: -0.40546511
class2Lprior: -1.09861229
- let:
classLPost:
type: {type: array, items: double}
new:
- "+": [class1LL, class1Lprior]
- "+": [class2LL, class2Lprior]
- let:
C: {a.logsumexp: [classLPost]}
- a.map:
- classLPost
- params: [{x: double}]
ret: double
do: {"-": [x, C]}
""")
pfa_output = engine.action({"f1": -1.0, "f2": 0.0, "f3": 1.0})
true_value = [-0.4017144770379799, -1.1061560181578711]
for p,t in zip(pfa_output, true_value):
self.assertAlmostEqual(p, t, places=3)
#MULTINOMIAL################################################################
def testNaiveMultinomial(self):
# First to map signature, then array signature
# from sklearn.naive_bayes import MultinomialNB
# X = array([[0, 3, 0, 3],
# [0, 1, 2, 1],
# [4, 0, 2, 4],
# [0, 1, 4, 0],
# [4, 4, 4, 1],
# [2, 0, 0, 1],
# [2, 0, 4, 2]])
# Y = array([1, 1, 1, 2, 2, 3, 3])
# clf = MultinomialNB(alpha=.01)
# clf.fit(X,Y)
engine, = PFAEngine.fromYaml("""
input: {type: map, values: double}
output: {type: array, items: double}
cells:
threeClass:
type:
type: map
values:
type: map
values: double
init:
{class1: {f1: 0.2,
f2: 0.2,
f3: 0.2,
f4: 0.4},
class2: {f1: 0.22228381448432147,
f2: 0.27771618612438459,
f3: 0.44401330534751776,
f4: 0.055986696442301712},
class3: {f1: 0.36322463751061512,
f2: 0.0009057970985842759,
f3: 0.36322463751061512,
f4: 0.27264492810555946}}
action:
- let:
class1params: {cell: threeClass, path: [{string: class1}]}
class2params: {cell: threeClass, path: [{string: class2}]}
class3params: {cell: threeClass, path: [{string: class3}]}
- let:
class1LL: {model.naive.multinomial: [input, class1params]}
class2LL: {model.naive.multinomial: [input, class2params]}
class3LL: {model.naive.multinomial: [input, class3params]}
- let:
classLL:
type: {type: array, items: double}
new:
- class1LL
- class2LL
- class3LL
- let:
class1Lprior: -0.84729786
class2Lprior: -1.25276297
class3Lprior: -1.25276297
- let:
classLPost:
type: {type: array, items: double}
new:
- "+": [class1LL, class1Lprior]
- "+": [class2LL, class2Lprior]
- "+": [class3LL, class3Lprior]
- let:
C: {a.logsumexp: [classLPost]}
- a.map:
- classLPost
- params: [{x: double}]
ret: double
do: {"-": [x, C]}
""")
pfa_output = engine.action({"f1": 0, "f2": 1, "f3": 2, "f4": 1})
true_value = [-0.49768992, -0.9468967 , -5.4910462 ]
for p,t in zip(pfa_output, true_value):
self.assertAlmostEqual(p, t, places=3)
# test array signature
engine, = PFAEngine.fromYaml("""
input: {type: array, items: double}
output: {type: array, items: double}
cells:
threeClass:
type:
type: map
values:
type: array
items: double
init:
{class1: [0.2,
0.2,
0.2,
0.4],
class2: [0.22228381448432147,
0.27771618612438459,
0.44401330534751776,
0.055986696442301712],
class3: [0.36322463751061512,
0.0009057970985842759,
0.36322463751061512,
0.27264492810555946]}
action:
- let:
class1params: {cell: threeClass, path: [{string: class1}]}
class2params: {cell: threeClass, path: [{string: class2}]}
class3params: {cell: threeClass, path: [{string: class3}]}
- let:
class1LL: {model.naive.multinomial: [input, class1params]}
class2LL: {model.naive.multinomial: [input, class2params]}
class3LL: {model.naive.multinomial: [input, class3params]}
- let:
classLL:
type: {type: array, items: double}
new:
- class1LL
- class2LL
- class3LL
- let:
class1Lprior: -0.84729786
class2Lprior: -1.25276297
class3Lprior: -1.25276297
- let:
classLPost:
type: {type: array, items: double}
new:
- "+": [class1LL, class1Lprior]
- "+": [class2LL, class2Lprior]
- "+": [class3LL, class3Lprior]
- let:
C: {a.logsumexp: [classLPost]}
- a.map:
- classLPost
- params: [{x: double}]
ret: double
do: {"-": [x, C]}
""")
pfa_output = engine.action([0, 1, 2, 1])
true_value = [-0.49768992, -0.9468967 , -5.4910462 ]
for p,t in zip(pfa_output, true_value):
self.assertAlmostEqual(p, t, places=3)
#BERNOULLI################################################################
def testNaiveBernoulli(self):
# First to map signature, then array signature
# from sklearn.naive_bayes import BernoulliNB
# X = array([[1, 1, 0],
# [1, 1, 1],
# [1, 0, 1],
# [1, 0, 0],
# [1, 0, 0],
# [1, 1, 1]])
# Y = array([ 1., 1., 2., 2., 3., 3.])
# clf = BernoulliNB()
# clf.fit(X,Y)
engine, = PFAEngine.fromYaml("""
input: {type: array, items: string}
output: {type: array, items: double}
cells:
threeClass:
type:
type: map
values:
type: map
values: double
init:
{class1: {f1: 0.75,
f2: 0.75,
f3: 0.5},
class2: {f1: 0.75,
f2: 0.25,
f3: 0.5},
class3: {f1: 0.75,
f2: 0.5,
f3: 0.5}}
action:
- let:
class1params: {cell: threeClass, path: [{string: class1}]}
class2params: {cell: threeClass, path: [{string: class2}]}
class3params: {cell: threeClass, path: [{string: class3}]}
- let:
class1LL: {model.naive.bernoulli: [input, class1params]}
class2LL: {model.naive.bernoulli: [input, class2params]}
class3LL: {model.naive.bernoulli: [input, class3params]}
- let:
classLL:
type: {type: array, items: double}
new:
- class1LL
- class2LL
- class3LL
- let:
C: {a.logsumexp: [classLL]}
- a.map:
- classLL
- params: [{x: double}]
ret: double
do: {"-": [x, C]}
""")
pfa_output = engine.action(["f1", "f2", "somethingelse"])
true_value = [-0.69314718, -1.79175947, -1.09861229]
for p,t in zip(pfa_output, true_value):
self.assertAlmostEqual(p, t, places=3)
```
#### File: lib/model/testNeighbor.py
```python
import unittest
from titus.genpy import PFAEngine
from titus.errors import *
class TestLib1ModelNeighbor(unittest.TestCase):
def testFindKNearestNeighbors(self):
engine, = PFAEngine.fromYaml('''
input: {type: array, items: double}
output: {type: array, items: {type: array, items: double}}
cells:
codebook:
type:
type: array
items:
type: array
items: double
init:
- [1, 1, 1, 1, 1]
- [2, 2, 2, 2, 2]
- [3, 3, 3, 3, 3]
- [4, 4, 4, 4, 4]
- [5, 5, 5, 5, 5]
action:
model.neighbor.nearestK:
- 2
- input
- cell: codebook
''')
self.assertEqual(set(map(tuple, engine.action([1.2, 1.2, 1.2, 1.2, 1.2]))), set([(1.0, 1.0, 1.0, 1.0, 1.0), (2.0, 2.0, 2.0, 2.0, 2.0)]))
self.assertEqual(set(map(tuple, engine.action([4.1, 4.1, 4.1, 4.1, 4.1]))), set([(4.0, 4.0, 4.0, 4.0, 4.0), (5.0, 5.0, 5.0, 5.0, 5.0)]))
def testFindAllNeighborsInABall(self):
engine, = PFAEngine.fromYaml('''
input: {type: array, items: double}
output: {type: array, items: {type: array, items: double}}
cells:
codebook:
type:
type: array
items:
type: array
items: double
init:
- [1, 1, 1, 1, 1]
- [2, 2, 2, 2, 2]
- [3, 3, 3, 3, 3]
- [4, 4, 4, 4, 4]
- [5, 5, 5, 5, 5]
action:
model.neighbor.ballR:
- m.sqrt: 5
- input
- cell: codebook
''')
self.assertEqual(set(map(tuple, engine.action([1.2, 1.2, 1.2, 1.2, 1.2]))), set([(1.0, 1.0, 1.0, 1.0, 1.0), (2.0, 2.0, 2.0, 2.0, 2.0)]))
self.assertEqual(set(map(tuple, engine.action([4.1, 4.1, 4.1, 4.1, 4.1]))), set([(4.0, 4.0, 4.0, 4.0, 4.0), (5.0, 5.0, 5.0, 5.0, 5.0)]))
def testAverageOfSomePoints(self):
engine, = PFAEngine.fromYaml('''
input: {type: array, items: double}
output: {type: array, items: double}
cells:
points:
type:
type: array
items:
type: array
items: double
init:
- [1, 1, 1, 1, 1]
- [2, 2, 2, 2, 2]
- [3, 3, 3, 3, 3]
- [4, 4, 4, 4, 4]
- [5, 5, 5, 5, 5]
action:
model.neighbor.mean:
- cell: points
''')
self.assertEqual(engine.action([1.2, 1.2, 1.2, 1.2, 1.2]), [3.0, 3.0, 3.0, 3.0, 3.0])
engine, = PFAEngine.fromYaml('''
input: {type: array, items: double}
output: {type: array, items: double}
cells:
points:
type:
type: array
items:
type: array
items: double
init:
- [1, 1, 1, 1, 1]
- [2, 2, 2, 2, 2]
- [3, 3, 3, 3, 3]
- [4, 4, 4, 4, 4]
- [5, 5, 5, 5, 5]
action:
model.neighbor.mean:
- cell: points
- params: [{point: {type: array, items: double}}]
ret: double
do: {m.exp: {u-: {metric.simpleEuclidean: [input, point]}}}
''')
for x in engine.action([1.2, 1.2, 1.2, 1.2, 1.2]):
self.assertAlmostEqual(x, 1.253377, places=3)
```
#### File: lib/model/testNeural.py
```python
import unittest
from titus.genpy import PFAEngine
from titus.errors import *
class TestLib1ModelNeural(unittest.TestCase):
def testRegNeural(self):
engine, = PFAEngine.fromYaml("""
input: {type: array, items: double}
output: {type: array, items: double}
cells:
model:
type:
type: array
items:
type: record
name: layer
fields:
- {name: weights, type: {type: array, items: {type: array, items: double}}}
- {name: bias, type: {type: array, items: double}}
init:
- {weights: [[ -6.0, -8.0],
[-25.0, -30.0]],
bias: [ 4.0, 50.0]}
- {weights: [[-12.0, 30.0]],
bias: [-25.0]}
action:
m.link.logit:
model.neural.simpleLayers:
- input
- cell: model
- params: [{x: double}]
ret: double
do: {m.link.logit: [x]}
""")
self.assertAlmostEqual(engine.action([0.0, 0.0])[0], 0.0, places=1)
self.assertAlmostEqual(engine.action([1.0, 0.0])[0], 1.0, places=1)
self.assertAlmostEqual(engine.action([0.0, 1.0])[0], 1.0, places=1)
self.assertAlmostEqual(engine.action([1.0, 1.0])[0], 0.0, places=1)
# def testRegNeural(self):
# engine, = PFAEngine.fromYaml("""
# input: {type: array, items: double}
# output: double
# cells:
# model:
# type:
# type: record
# name: layers
# fields:
# - {name: weights, type: {type: array, items: {type: array, items: {type: array, items: double}}}}
# - {name: bias, type: {type: array, items: {type: array, items: double}}}
# init:
# weights: [ [[ -6.0, -8.0],
# [-25.0, -30.0]],
# [[-12.0, 30.0]] ]
# bias: [ [ 4.0, 50.0],
# [-25.0] ]
# action:
# attr:
# model.neural.simpleLayers:
# - input
# - cell: model
# - params: [{x: double}]
# ret: double
# do: {model.reg.norm.logit: [x]}
#
# path: [[id]]
# """)
# self.assertAlmostEqual(engine.action([0.1, 0.2, 0.3, 0.4, 0.5]), 103.9, places=1)
```
#### File: lib/prob/testDist.py
```python
import unittest
import math
from titus.genpy import PFAEngine
from titus.errors import *
class TestLib1ProbDist(unittest.TestCase):
def testNormalDistribution(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.gaussianLL:
- input
- value: {count: 21, mean: 10, variance: 4.0}
type: {type: record, name: Rec, namespace: what.ever, fields: [{name: count, type: double}, {name: mean, type: double}, {name: variance, type: double}]}
''')
self.assertAlmostEqual(engine.action(10.0), -1.612, places=3)
self.assertAlmostEqual(engine.action(12.0), -2.112, places=3)
self.assertAlmostEqual(engine.action(0.0), -14.11, places=2)
self.assertAlmostEqual(engine.action(15.0), -4.737, places=3)
self.assertAlmostEqual(engine.action(8.0), -2.112, places=3)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.gaussianLL: [input, 10.0, 2.0]
''')
self.assertAlmostEqual(engine.action(10.0), -1.612, places=3)
self.assertAlmostEqual(engine.action(12.0), -2.112, places=3)
self.assertAlmostEqual(engine.action(0.0), -14.11, places=2)
self.assertAlmostEqual(engine.action(15.0), -4.737, places=3)
self.assertAlmostEqual(engine.action(8.0), -2.112, places=3)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.gaussianCDF:
- input
- value: {count: 21, mean: 10, variance: 4.0}
type: {type: record, name: Rec, namespace: what.ever, fields: [{name: count, type: double}, {name: mean, type: double}, {name: variance, type: double}]}
''')
self.assertAlmostEqual(engine.action(10.0), 0.5, places=3)
self.assertAlmostEqual(engine.action(12.0), 0.8413, places=3)
self.assertAlmostEqual(engine.action(5.0 ), 0.0062, places=3)
self.assertAlmostEqual(engine.action(15.0), 0.9938, places=3)
self.assertAlmostEqual(engine.action(8.0 ), 0.1586, places=3)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.gaussianCDF: [input, 10.0, 2.0]
''')
self.assertAlmostEqual(engine.action(10.0), 0.5, places=3)
self.assertAlmostEqual(engine.action(12.0), 0.8413, places=3)
self.assertAlmostEqual(engine.action(5.0), 0.0062, places=3)
self.assertAlmostEqual(engine.action(15.0), 0.9938, places=3)
self.assertAlmostEqual(engine.action(8.0), 0.1586, places=3)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.gaussianQF:
- input
- value: {count: 21, mean: 10, variance: 4.0}
type: {type: record, name: Rec, namespace: what.ever, fields: [{name: count, type: double}, {name: mean, type: double}, {name: variance, type: double}]}
''')
self.assertEqual(engine.action(0.0), float('-inf'))
self.assertAlmostEqual(engine.action(0.01), 5.3473 , places=1)
self.assertAlmostEqual(engine.action(0.4 ), 9.4933 , places=2)
self.assertAlmostEqual(engine.action(0.5 ), 10.0000, places=2)
self.assertAlmostEqual(engine.action(0.99), 14.6527, places=1)
self.assertEqual(engine.action(1.0), float('inf'))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.gaussianQF: [input, 10.0, 2.0]
''')
self.assertEqual(engine.action(0.0), float('-inf'))
self.assertAlmostEqual(engine.action(0.01), 5.3473, places=1)
self.assertAlmostEqual(engine.action(0.4 ), 9.4933, places=2)
self.assertAlmostEqual(engine.action(0.5 ), 10.0000, places=2)
self.assertAlmostEqual(engine.action(0.99), 14.6527, places=1)
self.assertEqual(engine.action(1.0), float('inf'))
### Handle the right edge cases ###
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.gaussianLL:
- input
- value: {count: 21, mean: 10, variance: 0.0}
type: {type: record, name: Rec, namespace: what.ever, fields: [{name: count, type: double}, {name: mean, type: double}, {name: variance, type: double}]}
''')
self.assertEqual(engine.action(9.00), float('-inf'))
self.assertEqual(engine.action(10.0), float('inf'))
self.assertEqual(engine.action(11.0), float('-inf'))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.gaussianLL: [input, 10.0, 0.0]
''')
self.assertEqual(engine.action(9.00), float('-inf'))
self.assertEqual(engine.action(10.0), float('inf'))
self.assertEqual(engine.action(11.0), float('-inf'))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.gaussianCDF:
- input
- value: {count: 21, mean: 10, variance: 0.0}
type: {type: record, name: Rec, namespace: what.ever, fields: [{name: count, type: double}, {name: mean, type: double}, {name: variance, type: double}]}
''')
self.assertEqual(engine.action(9.00), 0.0)
self.assertEqual(engine.action(10.0), 1.0)
self.assertEqual(engine.action(11.0), 1.0)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.gaussianCDF: [input, 10.0, 0.0]
''')
self.assertEqual(engine.action(9.00), 0.0)
self.assertEqual(engine.action(10.0), 1.0)
self.assertEqual(engine.action(11.0), 1.0)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.gaussianQF:
- input
- value: {count: 21, mean: 10, variance: 0.0}
type: {type: record, name: Rec, namespace: what.ever, fields: [{name: count, type: double}, {name: mean, type: double}, {name: variance, type: double}]}
''')
self.assertEqual(engine.action(0.0), float('-inf'))
self.assertEqual(engine.action(1.0), float('inf'))
self.assertEqual(engine.action(0.4), 10.0)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.gaussianQF: [input, 10.0, 0.0]
''')
self.assertEqual(engine.action(0.0), float('-inf'))
self.assertEqual(engine.action(1.0), float('inf'))
self.assertEqual(engine.action(0.4), 10.0)
### raise the right exceptions ###
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.gaussianLL:
- input
- value: {count: 21, mean: 10, variance: -3.0}
type: {type: record, name: Rec, namespace: what.ever, fields: [{name: count, type: double}, {name: mean, type: double}, {name: variance, type: double}]}
''')
self.assertRaises(PFAException, lambda: engine.action(3.0))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.gaussianLL: [input, 10.0, -3.0]
''')
self.assertRaises(PFAException, lambda: engine.action(3.0))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.gaussianCDF:
- input
- value: {count: 21, mean: 10, variance: -3.0}
type: {type: record, name: Rec, namespace: what.ever, fields: [{name: count, type: double}, {name: mean, type: double}, {name: variance, type: double}]}
''')
self.assertRaises(PFAException, lambda: engine.action(3.0))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.gaussianCDF: [input, 10.0, -3.0]
''')
self.assertRaises(PFAException, lambda: engine.action(3.0))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.gaussianQF:
- input
- value: {count: 21, mean: 10, variance: -3.0}
type: {type: record, name: Rec, namespace: what.ever, fields: [{name: count, type: double}, {name: mean, type: double}, {name: variance, type: double}]}
''')
self.assertRaises(PFAException, lambda: engine.action(3.0))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.gaussianQF: [input, 10.0, 3.0]
''')
self.assertRaises(PFAException, lambda: engine.action(1.3))
self.assertRaises(PFAException, lambda: engine.action(-0.3))
############## EXPONENTIAL DISTRIBUTION #####################
def testExponentialDistribution(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.exponentialPDF: [input, 1] #[input, rate]
''')
self.assertEqual( engine.action(0.000), 1.0)
self.assertAlmostEqual(engine.action(1.000), 0.368, places=3)
self.assertAlmostEqual(engine.action(2.000), 0.135, places=3)
self.assertAlmostEqual(engine.action(2.500), 0.082, places=3)
self.assertEqual( engine.action(-20.0), 0.0)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.exponentialCDF: [input, 1] #[input, rate]
''')
self.assertEqual( engine.action(0.000), 0.0)
self.assertAlmostEqual(engine.action(1.000), 0.632 , places=3)
self.assertAlmostEqual(engine.action(2.000), 0.865 , places=3)
self.assertAlmostEqual(engine.action(2.500), 0.918 , places=3)
self.assertEqual( engine.action(-20.0), 0.0)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.exponentialQF: [input, 1] #[input, rate]
''')
self.assertEqual( engine.action(0.0), 0.0)
self.assertAlmostEqual(engine.action(0.3), 0.3567, places=3)
self.assertAlmostEqual(engine.action(0.5), 0.6931, places=3)
self.assertAlmostEqual(engine.action(0.8), 1.6094, places=3)
self.assertEqual( engine.action(1.0), float('inf'))
### handle edge cases properly ###
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.exponentialPDF: [input, 0] #[input, rate]
''')
self.assertEqual(engine.action(0.000), 0.0)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.exponentialCDF: [input, 0] #[input, rate]
''')
self.assertEqual(engine.action(0.000), 0.0)
self.assertEqual(engine.action(-1.30), 0.0)
self.assertEqual(engine.action(1.300), 0.0)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.exponentialQF: [input, 0.9] #[input, rate]
''')
self.assertEqual(engine.action(0.0), 0.0)
### raise the right exceptions ###
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.exponentialPDF: [input, -1] #[input, rate]
''')
self.assertRaises(PFAException, lambda: engine.action(3.0))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.exponentialCDF: [input, -1] #[input, rate]
''')
self.assertRaises(PFAException, lambda: engine.action(3.0))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.exponentialQF: [input, -1] #[input, rate]
''')
self.assertRaises(PFAException, lambda: engine.action(0.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.exponentialQF: [input, 1.5] #[input, rate]
''')
self.assertRaises(PFAException, lambda: engine.action(-1.4))
self.assertRaises(PFAException, lambda: engine.action(1.4))
############## POISSON DISTRIBUTION #####################
def testPoissonDistribution(self):
engine, = PFAEngine.fromYaml('''
input: int
output: double
action:
- prob.dist.poissonPDF: [input, 4] #[input, lambda]
''')
self.assertAlmostEqual(engine.action(0), 0.0183, places=3)
self.assertAlmostEqual(engine.action(1), 0.0733, places=3)
self.assertAlmostEqual(engine.action(2), 0.1465, places=3)
self.assertAlmostEqual(engine.action(10), 0.0053, places=3)
self.assertEqual( engine.action(-20), 0.0)
engine, = PFAEngine.fromYaml('''
input: int
output: double
action:
- prob.dist.poissonCDF: [input, 4] #[input, lambda]
''')
self.assertAlmostEqual(engine.action(0), 0.0183, places=3)
self.assertAlmostEqual(engine.action(2), 0.2381, places=3)
self.assertAlmostEqual(engine.action(10), 0.9972, places=3)
self.assertEqual( engine.action(-10), 0.0)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.poissonQF: [input, 4] #[input, lambda]
''')
self.assertEqual(engine.action(0.0), 0.0)
self.assertEqual(engine.action(0.3), 3.0)
self.assertEqual(engine.action(0.5), 4.0)
self.assertEqual(engine.action(0.8), 6.0)
self.assertEqual(engine.action(1.0), float('inf'))
### it must handle edge cases properly ###
engine, = PFAEngine.fromYaml('''
input: int
output: double
action:
- prob.dist.poissonPDF: [input, 0] #[input, lambda]
''')
self.assertEqual(engine.action(0), 1.0)
self.assertEqual(engine.action(4), 0.0)
engine, = PFAEngine.fromYaml('''
input: int
output: double
action:
- prob.dist.poissonCDF: [input, 0] #[input, lambda]
''')
self.assertEqual(engine.action(0), 1.0)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.poissonQF: [input, 0] #[input, lambda]
''')
self.assertEqual(engine.action(0.0), 0.0)
self.assertEqual(engine.action(0.8), 0.0)
self.assertEqual(engine.action(1.0), 0.0)
### it must raise the right exceptions ###
engine, = PFAEngine.fromYaml('''
input: int
output: double
action:
- prob.dist.poissonPDF: [input, -4] #[input, lambda]
''')
self.assertRaises(PFAException, lambda: engine.action(4))
engine, = PFAEngine.fromYaml('''
input: int
output: double
action:
- prob.dist.poissonCDF: [input, -3] #[input, lambda]
''')
self.assertRaises(PFAException, lambda: engine.action(4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.poissonQF: [input, -2] #[input, lambda]
''')
self.assertRaises(PFAException, lambda: engine.action(0.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.poissonQF: [input, 2] #[input, lambda]
''')
self.assertRaises(PFAException, lambda: engine.action(-.4))
self.assertRaises(PFAException, lambda: engine.action(1.4))
############## CHI2 DISTRIBUTION #####################
def testChi2Distribution(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.chi2PDF: [input, 4] #[input, degrees of freedom]
''')
self.assertEqual( engine.action(0.000), 0.0)
self.assertAlmostEqual(engine.action(1.000), 0.1516, places=3)
self.assertAlmostEqual(engine.action(2.000), 0.1839, places=3)
self.assertAlmostEqual(engine.action(2.500), 0.1791, places=3)
self.assertEqual( engine.action(-20.0), 0.0)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.chi2CDF: [input, 4] #[input, degrees of freedom]
''')
self.assertEqual( engine.action(0.0), 0.0)
self.assertAlmostEqual(engine.action(1.0), 0.0902 , places=3)
self.assertAlmostEqual(engine.action(5.0), 0.7127 , places=3)
self.assertAlmostEqual(engine.action(8.5), 0.9251 , places=3)
self.assertEqual( engine.action(-20.0), 0.0)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.chi2QF: [input, 4] #[input, degrees of freedom]
''')
self.assertEqual( engine.action(0.0), 0.0)
self.assertAlmostEqual(engine.action(0.3), 2.1947, places=3)
self.assertAlmostEqual(engine.action(0.5), 3.3567, places=3)
self.assertAlmostEqual(engine.action(0.8), 5.9886, places=3)
self.assertEqual( engine.action(1.0), float('inf'))
### it must handle edge cases ###
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.chi2PDF: [input, 0] #[input, degrees of freedom]
''')
self.assertEqual(engine.action(0.000), float('inf'))
self.assertEqual(engine.action(1.600), 0.0)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.chi2CDF: [input, 0] #[input, degrees of freedom]
''')
self.assertEqual(engine.action(0.000), 0.0 )
self.assertEqual(engine.action(1.600), 1.0)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.chi2QF: [input, 0] #[input, degrees of freedom]
''')
self.assertEqual( engine.action(0.4), 0.0)
self.assertEqual( engine.action(1.0), float('inf'))
### it must raise the right exceptions ###
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.chi2PDF: [input, -1] #[input, degrees of freedom]
''')
self.assertRaises(PFAException, lambda: engine.action(0.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.chi2CDF: [input, -3] #[input, degrees of freedom]
''')
self.assertRaises(PFAException, lambda: engine.action(0.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.chi2QF: [input, -3] #[input, degrees of freedom]
''')
self.assertRaises(PFAException, lambda: engine.action(0.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.chi2QF: [input, 3] #[input, degrees of freedom]
''')
self.assertRaises(PFAException, lambda: engine.action(-.4))
self.assertRaises(PFAException, lambda: engine.action(1.4))
############# TEST F DISTRIBUTION ########################
def testFDistribution(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.fPDF: [input, 4, 10]
''')
self.assertEqual( engine.action(0.0), 0.0)
self.assertAlmostEqual(engine.action(1.5), 0.2682, places=3)
self.assertAlmostEqual(engine.action(2.0), 0.1568, places=3)
self.assertAlmostEqual(engine.action(10.0), 0.000614, places=4)
self.assertEqual( engine.action(-20.0), 0.0)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.fCDF: [input, 4, 10]
''')
self.assertEqual( engine.action(0.0), 0.0)
self.assertAlmostEqual(engine.action(0.1), 0.0200, places=3)
self.assertAlmostEqual(engine.action(0.9), 0.5006, places=3)
self.assertAlmostEqual(engine.action(4.0), 0.9657, places=3)
self.assertAlmostEqual(engine.action(100.0), 0.9999, places=3)
self.assertEqual( engine.action(-20.0), 0.0)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.fQF: [input, 4, 10]
''')
self.assertEqual( engine.action(0.000), 0.0000)
self.assertAlmostEqual(engine.action(0.001), 0.0208, places=3)
self.assertAlmostEqual(engine.action(0.400), 0.7158, places=3)
self.assertAlmostEqual(engine.action(0.999), 11.282, places=2)
self.assertEqual( engine.action(1.000), float('inf'))
### check edge case handling ###
# no real edge cases (doesnt act like a delta anywhere)
### must raise the right exceptions ###
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.fPDF: [input, 0, 10]
''')
self.assertRaises(PFAException, lambda: engine.action(0.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.fCDF: [input, 4, 0]
''')
self.assertRaises(PFAException, lambda: engine.action(0.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.fQF: [input, 0, 10]
''')
self.assertRaises(PFAException, lambda: engine.action(0.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.fQF: [input, 4, 10]
''')
self.assertRaises(PFAException, lambda: engine.action(-.4))
self.assertRaises(PFAException, lambda: engine.action(1.4))
############## GAMMA DISTRIBUTION #####################
def testGammaDistribution(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.gammaPDF: [input, 3.0, 3.0] #[input, shape, scale]
''')
self.assertEqual( engine.action(0.000), 0.0000)
self.assertAlmostEqual(engine.action(1.000), 0.0133, places=3)
self.assertAlmostEqual(engine.action(2.000), 0.0380, places=3)
self.assertAlmostEqual(engine.action(4.000), 0.0781, places=3)
self.assertEqual( engine.action(-20.0), 0.0000)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.gammaCDF: [input, 3.0, 3.0] #[input, shape, scale]
''')
self.assertEqual( engine.action(0.000), 0.0000)
self.assertAlmostEqual(engine.action(3.000), 0.0803, places=3)
self.assertAlmostEqual(engine.action(6.000), 0.3233, places=3)
self.assertAlmostEqual(engine.action(10.00), 0.6472, places=3)
self.assertAlmostEqual(engine.action(100.0), 1.0000, places=3)
self.assertEqual( engine.action(-20.0), 0.0000)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.gammaQF: [input, 3.0, 3.0] #[input, shape, scale]
''')
self.assertEqual( engine.action(0.000), 0.0000)
self.assertAlmostEqual(engine.action(0.001), 0.5716, places=3)
self.assertAlmostEqual(engine.action(0.400), 6.8552, places=3)
self.assertAlmostEqual(engine.action(0.999), 33.687, places=2)
self.assertEqual( engine.action(1.000), float('inf'))
### it must raise the right exceptions ###
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.gammaPDF: [input, -1.3, -3.0] #[input, shape, scale]
''')
self.assertRaises(PFAException, lambda: engine.action(.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.gammaCDF: [input, -3.0, 1.0] #[input, shape, scale]
''')
self.assertRaises(PFAException, lambda: engine.action(.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.gammaQF: [input, -1.0, 3.0] #[input, shape, scale]
''')
self.assertRaises(PFAException, lambda: engine.action(.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.gammaQF: [input, 2.0, 3.0] #[input, shape, scale]
''')
self.assertRaises(PFAException, lambda: engine.action(-.4))
self.assertRaises(PFAException, lambda: engine.action(1.4))
############## BETA DISTRIBUTION #####################
def testBetaDistribution(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.betaPDF: [input, 4, 3] #[input, shape1, shape2]
''')
self.assertEqual( engine.action(0.000), 0.0000)
self.assertAlmostEqual(engine.action(0.100), 0.0486, places=3)
self.assertAlmostEqual(engine.action(0.800), 1.2288, places=3)
self.assertAlmostEqual(engine.action(-20.0), 0.0000, places=3)
self.assertEqual( engine.action(9.000), 0.0000)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.betaCDF: [input, 4, 3] #[input, shape1, shape2]
''')
self.assertEqual( engine.action(0.000), 0.0000)
self.assertAlmostEqual(engine.action(0.100), 0.0013, places=3)
self.assertAlmostEqual(engine.action(0.900), 0.9842, places=3)
self.assertAlmostEqual(engine.action(4.000), 1.0000, places=3)
self.assertEqual( engine.action(-20.0), 0.0000)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.betaQF: [input, 4, 3] #[input, shape1, shape2]
''')
self.assertEqual( engine.action(0.000), 0.0000)
self.assertAlmostEqual(engine.action(0.001), 0.0939, places=3)
self.assertAlmostEqual(engine.action(0.400), 0.5292, places=3)
self.assertAlmostEqual(engine.action(0.999), 0.9621, places=3)
self.assertEqual( engine.action(1.000), 1.0000)
### it must handle edge cases properly ###
## no real edge cases
### it must raise the right exceptions ###
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.betaPDF: [input, 0, 3] #[input, shape1, shape2]
''')
self.assertRaises(PFAException, lambda: engine.action(.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.betaCDF: [input, 4, -3] #[input, shape1, shape2]
''')
self.assertRaises(PFAException, lambda: engine.action(.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.betaQF: [input, -4, 0] #[input, shape1, shape2]
''')
self.assertRaises(PFAException, lambda: engine.action(.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.betaQF: [input, 4, 3] #[input, shape1, shape2]
''')
self.assertRaises(PFAException, lambda: engine.action(-.4))
self.assertRaises(PFAException, lambda: engine.action(1.4))
############## CAUCHY DISTRIBUTION #####################
def testCauchyDistribution(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.cauchyPDF: [input, 4, 3] #[input, location, scale]
''')
self.assertAlmostEqual(engine.action(-3.00), 0.0165, places=3)
self.assertAlmostEqual(engine.action(0.000), 0.0382, places=3)
self.assertAlmostEqual(engine.action(0.500), 0.0449, places=3)
self.assertAlmostEqual(engine.action(10.00), 0.0212, places=3)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.cauchyCDF: [input, 4, 3] #[input, location, scale]
''')
self.assertAlmostEqual(engine.action(0.000), 0.2048, places=3)
self.assertAlmostEqual(engine.action(0.100), 0.2087, places=3)
self.assertAlmostEqual(engine.action(0.900), 0.2448, places=3)
self.assertAlmostEqual(engine.action(4.000), 0.5000, places=3)
self.assertAlmostEqual(engine.action(-20.0), 0.0396, places=3)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.cauchyQF: [input, 4, 3] #[input, location, scale]
''')
self.assertEqual( engine.action(0.000), float('-inf'))
self.assertAlmostEqual(engine.action(0.001), -950.926, places=1)
self.assertAlmostEqual(engine.action(0.400), 3.0252, places=3)
self.assertAlmostEqual(engine.action(0.999), 958.926, places=2)
self.assertEqual( engine.action(1.000), float('inf'))
### must handle edge cases ###
## cauchy distribution DOESNT become a delta fcn when scale=0
### must raise the right exceptions ###
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.cauchyPDF: [input, 4, -3] #[input, location, scale]
''')
self.assertRaises(PFAException, lambda: engine.action(.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.cauchyCDF: [input, 4, 0] #[input, location, scale]
''')
self.assertRaises(PFAException, lambda: engine.action(.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.cauchyQF: [input, 4, -1] #[input, location, scale]
''')
self.assertRaises(PFAException, lambda: engine.action(.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.cauchyQF: [input, 4, 3] #[input, location, scale]
''')
self.assertRaises(PFAException, lambda: engine.action(1.4))
self.assertRaises(PFAException, lambda: engine.action(-.4))
############## LOGNORMAL DISTRIBUTION #####################
def testLogNormalDistribution(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.lognormalPDF: [input, 2.0, 1.0] #[input, meanlog, sdlog]
''')
self.assertEqual( engine.action(0.000), 0.0000)
self.assertAlmostEqual(engine.action(1.000), 0.0539, places=3)
self.assertAlmostEqual(engine.action(2.000), 0.0849, places=3)
self.assertAlmostEqual(engine.action(4.000), 0.0826, places=3)
self.assertEqual( engine.action(-20.0), 0.0000)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.lognormalCDF: [input, 2.0, 1.0] #[input, meanlog, sdlog]
''')
self.assertEqual( engine.action(0.000), 0.0000)
self.assertAlmostEqual(engine.action(0.900), 0.0176, places=3)
self.assertAlmostEqual(engine.action(4.000), 0.2697, places=3)
self.assertAlmostEqual(engine.action(100.0), 0.9954, places=3)
self.assertEqual( engine.action(-20.0), 0.0000)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.lognormalQF: [input, 2.0, 1.0] #[input, meanlog, sdlog]
''')
self.assertEqual( engine.action(0.000), 0.0000)
self.assertAlmostEqual(engine.action(0.001), 0.3361, places=3)
self.assertAlmostEqual(engine.action(0.400), 5.7354, places=3)
self.assertAlmostEqual(engine.action(0.999), 162.43, places=2)
self.assertEqual( engine.action(1.000), float('inf'))
### must raise the right exceptions ###
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.lognormalPDF: [input, 2.0, -3.0] #[input, meanlog, sdlog]
''')
self.assertRaises(PFAException, lambda: engine.action(.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.lognormalCDF: [input, 2.0, 0.0] #[input, meanlog, sdlog]
''')
self.assertRaises(PFAException, lambda: engine.action(.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.lognormalQF: [input, 2.0, -1.0] #[input, meanlog, sdlog]
''')
self.assertRaises(PFAException, lambda: engine.action(.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.lognormalQF: [input, 2.0, 1.0] #[input, meanlog, sdlog]
''')
self.assertRaises(PFAException, lambda: engine.action(-.4))
self.assertRaises(PFAException, lambda: engine.action(1.4))
############## STUDENTT DISTRIBUTION #####################
def testStudentTDistribution(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.tPDF: [input, 2] #[input, degrees of freedom, noncentrality]
''')
self.assertAlmostEqual(engine.action(-1.00), 0.1924, places=3)
self.assertAlmostEqual(engine.action(1.000), 0.1924, places=3)
self.assertAlmostEqual(engine.action(2.000), 0.0680, places=3)
self.assertAlmostEqual(engine.action(4.000), 0.0131, places=3)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.tCDF: [input, 2] #[input, degrees of freedom, noncentrality]
''')
self.assertAlmostEqual(engine.action(-0.90), 0.2315, places=3)
self.assertAlmostEqual(engine.action(0.000), 0.5000, places=3)
self.assertAlmostEqual(engine.action(0.900), 0.7684, places=3)
self.assertAlmostEqual(engine.action(100.0), 0.9999, places=3)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.tQF: [input, 2] #[input, degrees of freedom, noncentrality]
''')
self.assertEqual( engine.action(0.000), float('-inf'))
self.assertAlmostEqual(engine.action(0.001), -22.33, places=2)
self.assertAlmostEqual(engine.action(0.400), -.2887, places=3)
self.assertAlmostEqual(engine.action(0.999), 22.327, places=2)
self.assertEqual( engine.action(1.000), float('inf'))
### must handle exceptions properly ###
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.tPDF: [input, -2] #[input, degrees of freedom, noncentrality]
''')
self.assertRaises(PFAException, lambda: engine.action(.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.tCDF: [input, -1] #[input, degrees of freedom, noncentrality]
''')
self.assertRaises(PFAException, lambda: engine.action(.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.tQF: [input, 0] #[input, degrees of freedom, noncentrality]
''')
self.assertRaises(PFAException, lambda: engine.action(.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.tQF: [input, 2] #[input, degrees of freedom, noncentrality]
''')
self.assertRaises(PFAException, lambda: engine.action(-.4))
self.assertRaises(PFAException, lambda: engine.action(1.4))
############## BINOMIAL DISTRIBUTION #####################
def testBinomialDistribution(self):
engine, = PFAEngine.fromYaml('''
input: int
output: double
action:
- prob.dist.binomialPDF: [input, 4, .4] #[input, size, prob]
''')
self.assertEqual( engine.action(0), 0.1296)
self.assertAlmostEqual(engine.action(1), 0.3456, places=3)
self.assertAlmostEqual(engine.action(2), 0.3456, places=3)
self.assertAlmostEqual(engine.action(10), 0.0000, places=3)
self.assertEqual( engine.action(-20), 0.0000)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.binomialCDF: [input, 4, .4] #[input, size, prob]
''')
self.assertAlmostEqual(engine.action(0.0), 0.1296, places=3)
self.assertAlmostEqual(engine.action(2.0), 0.8208, places=3)
self.assertAlmostEqual(engine.action(2.5), 0.8208, places=3)
self.assertAlmostEqual(engine.action(10.0), 1.0000, places=3)
self.assertEqual( engine.action(-10.0), 0.0000)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.binomialQF: [input, 4, .4] #[input, size, prob]
''')
self.assertEqual(engine.action(0.0), 0.0)
self.assertEqual(engine.action(0.3), 1.0)
self.assertEqual(engine.action(0.5), 2.0)
self.assertEqual(engine.action(0.8), 2.0)
self.assertEqual(engine.action(1.0), 4.0)
### must handle edge cases properly ###
engine, = PFAEngine.fromYaml('''
input: int
output: double
action:
- prob.dist.binomialPDF: [input, 4, 0.0] #[input, size, prob]
''')
self.assertEqual(engine.action(0), 1.0)
self.assertEqual(engine.action(1), 0.0)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.binomialCDF: [input, 4, 0.0] #[input, size, prob]
''')
self.assertEqual(engine.action(0.0), 1.0000)
self.assertEqual(engine.action(-1.0), 0.0000)
self.assertEqual(engine.action(2.0), 1.0000)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.binomialQF: [input, 4, 0.0] #[input, size, prob]
''')
self.assertEqual(engine.action(0.0), 0.0000)
self.assertEqual(engine.action(0.3), 0.0000)
self.assertEqual(engine.action(1.0), 4.0000)
### must raise the right exceptions ###
engine, = PFAEngine.fromYaml('''
input: int
output: double
action:
- prob.dist.binomialPDF: [input, -4, 0.4] #[input, size, prob]
''')
self.assertRaises(PFAException, lambda: engine.action(5))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.binomialCDF: [input, 4, 1.1] #[input, size, prob]
''')
self.assertRaises(PFAException, lambda: engine.action(4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.binomialQF: [input, 4, 0.4] #[input, size, prob]
''')
self.assertRaises(PFAException, lambda: engine.action(-.4))
self.assertRaises(PFAException, lambda: engine.action(1.4))
############## UNIFORM DISTRIBUTION #####################
def testUniformDistribution(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.uniformPDF: [input, 1.0, 3.0] #[input, min, max]
''')
self.assertEqual( engine.action(0.000), 0.0000)
self.assertAlmostEqual(engine.action(1.000), 0.5000, places=3)
self.assertAlmostEqual(engine.action(2.000), 0.5000, places=3)
self.assertEqual( engine.action(4.000), 0.0000)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.uniformCDF: [input, 1.0, 3.0] #[input, min, max]
''')
self.assertEqual( engine.action(1.000), 0.0000)
self.assertAlmostEqual(engine.action(1.500), 0.2500, places=3)
self.assertAlmostEqual(engine.action(2.000), 0.5000, places=3)
self.assertAlmostEqual(engine.action(2.300), 0.6500, places=3)
self.assertEqual( engine.action(5.000), 1.0000)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.uniformQF: [input, 1.0, 3.0] #[input, min, max]
''')
self.assertEqual( engine.action(0.000), 1.0000)
self.assertAlmostEqual(engine.action(0.001), 1.0020, places=3)
self.assertAlmostEqual(engine.action(0.400), 1.8000, places=3)
self.assertAlmostEqual(engine.action(0.999), 2.9980, places=2)
self.assertEqual( engine.action(1.000), 3.0000)
### must handle exceptions correctly ###
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.uniformPDF: [input, 5.0, 3.0] #[input, min, max]
''')
self.assertRaises(PFAException, lambda: engine.action(2.0))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.uniformCDF: [input, 4.0, 3.0] #[input, min, max]
''')
self.assertRaises(PFAException, lambda: engine.action(2.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.uniformQF: [input, 3.0, 3.0] #[input, min, max]
''')
self.assertRaises(PFAException, lambda: engine.action(.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.uniformQF: [input, 1.0, 3.0] #[input, min, max]
''')
self.assertRaises(PFAException, lambda: engine.action(-.4))
self.assertRaises(PFAException, lambda: engine.action(1.4))
############## GEOMETRIC DISTRIBUTION #####################
def testGeometricDistribution(self):
engine, = PFAEngine.fromYaml('''
input: int
output: double
action:
- prob.dist.geometricPDF: [input, 0.4] #[input, probability of success]
''')
self.assertEqual( engine.action(0), 0.4000)
self.assertAlmostEqual(engine.action(1), 0.2400, places=3)
self.assertAlmostEqual(engine.action(4), 0.0518, places=3)
self.assertEqual( engine.action(-20), 0.0)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.geometricCDF: [input, 0.4] #[input, probability of success]
''')
self.assertEqual( engine.action(0.000), 0.4)
self.assertAlmostEqual(engine.action(1.000), 0.640 , places=3)
self.assertAlmostEqual(engine.action(2.000), 0.784 , places=3)
self.assertAlmostEqual(engine.action(2.500), 0.784 , places=3)
self.assertEqual( engine.action(-20.0), 0.0)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.geometricQF: [input, 0.4] #[input, probability of success]
''')
self.assertEqual(engine.action(0.0), 0.0)
self.assertEqual(engine.action(0.3), 0.0)
self.assertEqual(engine.action(0.5), 1.0)
self.assertEqual(engine.action(0.8), 3.0)
self.assertEqual(engine.action(1.0), float('inf'))
### must raise the right exceptions ###
engine, = PFAEngine.fromYaml('''
input: int
output: double
action:
- prob.dist.geometricPDF: [input, 1.4] #[input, probability of success]
''')
self.assertRaises(PFAException, lambda: engine.action(2))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.geometricCDF: [input, -0.4] #[input, probability of success]
''')
self.assertRaises(PFAException, lambda: engine.action(2.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.geometricQF: [input, -0.4] #[input, probability of success]
''')
self.assertRaises(PFAException, lambda: engine.action(0.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.geometricQF: [input, 0.4] #[input, probability of success]
''')
self.assertRaises(PFAException, lambda: engine.action(-.4))
self.assertRaises(PFAException, lambda: engine.action(1.4))
############## HYPERGEOMETRIC DISTRIBUTION #####################
def testHypergeometricDistribution(self):
engine, = PFAEngine.fromYaml('''
input: int
output: double
action:
- prob.dist.hypergeometricPDF: [input, 10, 5, 3] #[input (number of white balls drawn), n white balls, n black balls, n balls drawn]
''')
self.assertAlmostEqual(engine.action(0), 0.0219, places=3)
self.assertAlmostEqual(engine.action(1), 0.2198, places=3)
self.assertAlmostEqual(engine.action(4), 0.0000, places=3)
self.assertEqual( engine.action(-20), 0.0000)
engine, = PFAEngine.fromYaml('''
input: int
output: double
action:
- prob.dist.hypergeometricCDF: [input, 10, 5, 3] #[input (number of white balls drawn), n white balls, n black balls, n balls drawn]
''')
self.assertAlmostEqual(engine.action(0), 0.0219, places=3)
self.assertAlmostEqual(engine.action(1), 0.2418, places=3)
self.assertAlmostEqual(engine.action(2), 0.7363, places=3)
self.assertAlmostEqual(engine.action(2), 0.7363, places=3)
self.assertEqual( engine.action(-20), 0.0000)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.hypergeometricQF: [input, 10, 5, 3] #[input (number of white balls drawn), n white balls, n black balls, n balls drawn]
''')
self.assertEqual(engine.action(0.0), 0.0)
self.assertEqual(engine.action(0.3), 2.0)
self.assertEqual(engine.action(0.5), 2.0)
self.assertEqual(engine.action(0.8), 3.0)
self.assertEqual(engine.action(0.99), 3.0)
self.assertEqual(engine.action(1.0), 3.0)
### must raise the right exceptions ###
# 1. you cant draw more balls than are in the urn
# 2. you cant draw more white balls than are in the urn (this happens with probability zero)
# 3. in QF: you cant input probabilities greater than 1, less than 0
# check 1
engine, = PFAEngine.fromYaml('''
input: int
output: double
action:
- prob.dist.hypergeometricPDF: [input, 4, 4, 20] #[input (number of white balls drawn), n white balls, n black balls, n balls drawn]
''')
self.assertRaises(PFAException, lambda: engine.action(3))
# check 2
engine, = PFAEngine.fromYaml('''
input: int
output: double
action:
- prob.dist.hypergeometricCDF: [input, 10, 5, 3] #[input (number of white balls drawn), n white balls, n black balls, n balls drawn]
''')
self.assertEqual(engine.action(2000), 0.0)
# check 3
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.hypergeometricQF: [input, 10, 5, 3] #[input (number of white balls drawn), n white balls, n black balls, n balls drawn]
''')
self.assertRaises(PFAException, lambda: engine.action(-.4))
self.assertRaises(PFAException, lambda: engine.action(1.4))
############## NEGATIVE BINOMIAL DISTRIBUTION #####################
def testNegativeBinomialDistribution(self):
engine, = PFAEngine.fromYaml('''
input: int
output: double
action:
- prob.dist.negativeBinomialPDF: [input, 5, .7] #[input, size, probability ]
''')
self.assertAlmostEqual(engine.action(0), 0.1681, places=3)
self.assertAlmostEqual(engine.action(3), 0.1588, places=3)
self.assertAlmostEqual(engine.action(6), 0.0257, places=3)
self.assertEqual( engine.action(-20), 0.0000)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.negativeBinomialCDF: [input, 5, .7] #[input, size, probability ]
''')
self.assertAlmostEqual(engine.action(0.000), 0.1681, places=3)
self.assertAlmostEqual(engine.action(1.000), 0.4202, places=3)
self.assertAlmostEqual(engine.action(2.000), 0.6471, places=3)
self.assertAlmostEqual(engine.action(2.500), 0.6471, places=3)
self.assertEqual( engine.action(-20.0), 0.0000)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.negativeBinomialQF: [input, 5, .7] #[input, size, probability ]
''')
self.assertEqual(engine.action(0.0), 0.0)
self.assertEqual(engine.action(0.3), 1.0)
self.assertEqual(engine.action(0.5), 2.0)
self.assertEqual(engine.action(0.8), 3.0)
self.assertEqual(engine.action(1.0), float('inf'))
### must handle edge cases properly ###
engine, = PFAEngine.fromYaml('''
input: int
output: double
action:
- prob.dist.negativeBinomialPDF: [input, 0, .7] #[input, size, prob]
''')
self.assertEqual(engine.action(0), 1.0)
self.assertEqual(engine.action(3), 0.0)
self.assertEqual(engine.action(6), 0.0)
self.assertEqual(engine.action(-20), 0.0)
### must raise the right exceptions ###
engine, = PFAEngine.fromYaml('''
input: int
output: double
action:
- prob.dist.negativeBinomialPDF: [input, 4, 0.0] #[input, size, prob]
''')
self.assertRaises(PFAException, lambda: engine.action(5))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.negativeBinomialCDF: [input, 4, 1.1] #[input, size, prob]
''')
self.assertRaises(PFAException, lambda: engine.action(4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.negativeBinomialQF: [input, 0, -0.4] #[input, size, prob]
''')
self.assertRaises(PFAException, lambda: engine.action(.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.negativeBinomialQF: [input, 4, 0.4] #[input, size, prob]
''')
self.assertRaises(PFAException, lambda: engine.action(-.4))
self.assertRaises(PFAException, lambda: engine.action(1.4))
############## WEIBULL DISTRIBUTION #####################
def testWeibullDistribution(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.weibullPDF: [input, 2, 4] #[input, shape, scale]
''')
self.assertEqual( engine.action(0.000), 0.0000)
self.assertAlmostEqual(engine.action(0.300), 0.0373, places=3)
self.assertAlmostEqual(engine.action(5.000), 0.1310, places=3)
self.assertAlmostEqual(engine.action(-20.0), 0.0000, places=3)
self.assertAlmostEqual(engine.action(9.000), 0.0071, places=3)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.weibullCDF: [input, 2, 4] #[input, shape, scale]
''')
self.assertEqual( engine.action(0.000), 0.0000)
self.assertAlmostEqual(engine.action(0.100), 0.0006, places=3)
self.assertAlmostEqual(engine.action(0.900), 0.0494, places=3)
self.assertAlmostEqual(engine.action(4.000), 0.6321, places=3)
self.assertEqual( engine.action(-20.0), 0.0000)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.weibullQF: [input, 2, 4] #[input, shape, scale]
''')
self.assertEqual( engine.action(0.000), 0.0000)
self.assertAlmostEqual(engine.action(0.001), 0.1265, places=3)
self.assertAlmostEqual(engine.action(0.400), 2.8589, places=3)
self.assertAlmostEqual(engine.action(0.999), 10.513, places=3)
self.assertEqual( engine.action(1.000), float('inf'))
### it must raise the righte exceptions ###
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.weibullPDF: [input, -2, 4] #[input, shape, scale]
''')
self.assertRaises(PFAException, lambda: engine.action(1.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.weibullCDF: [input, 2, 0] #[input, shape, scale]
''')
self.assertRaises(PFAException, lambda: engine.action(1.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.weibullQF: [input, 0, 4] #[input, shape, scale]
''')
self.assertRaises(PFAException, lambda: engine.action(1.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.weibullQF: [input, 2, 4] #[input, shape, scale]
''')
self.assertRaises(PFAException, lambda: engine.action(-.4))
self.assertRaises(PFAException, lambda: engine.action(1.4))
```
#### File: test/lib/testBytes.py
```python
import math
import struct
import unittest
from titus.genpy import PFAEngine
from titus.errors import *
class TestLib1Bytes(unittest.TestCase):
#################################################################### basic access
def testGetLength(self):
engine, = PFAEngine.fromYaml('''
input: bytes
output: int
action:
bytes.len: input
''')
self.assertEqual(engine.action(struct.pack("bbbbb", 104, 101, 108, 108, 111)), 5)
def testGetSubseq(self):
engine, = PFAEngine.fromYaml('''
input: int
output: string
action:
{bytes.decodeAscii: {bytes.subseq: [{bytes.encodeAscii: {string: ABCDEFGHIJKLMNOPQRSTUVWXYZ}}, 5, input]}}
''')
self.assertEqual(engine.action(10), "FGHIJ")
self.assertEqual(engine.action(-10), "FGHIJKLMNOP")
self.assertEqual(engine.action(0), "")
self.assertEqual(engine.action(1), "")
self.assertEqual(engine.action(100), "FGHIJKLMNOPQRSTUVWXYZ")
def testGetSubseqTo(self):
engine, = PFAEngine.fromYaml('''
input: int
output: string
action:
{bytes.decodeAscii: {bytes.subseqto: [{bytes.encodeAscii: {string: ABCDEFGHIJKLMNOPQRSTUVWXYZ}}, 5, input, {bytes.encodeAscii: {string: ...}}]}}
''')
self.assertEqual(engine.action(10), "ABCDE...KLMNOPQRSTUVWXYZ")
self.assertEqual(engine.action(-10), "ABCDE...QRSTUVWXYZ")
self.assertEqual(engine.action(0), "ABCDE...FGHIJKLMNOPQRSTUVWXYZ")
self.assertEqual(engine.action(1), "ABCDE...FGHIJKLMNOPQRSTUVWXYZ")
self.assertEqual(engine.action(100), "ABCDE...")
#################################################################### encoding testers
def testCheckAscii(self):
engine, = PFAEngine.fromYaml('''
input: bytes
output: boolean
action:
bytes.isAscii: input
''')
self.assertEqual(engine.action(struct.pack("bbbbb", 104, 101, 108, 108, 111)), True)
self.assertEqual(engine.action(struct.pack("bbbbb", 104, 101, 108, -127, 111)), False)
def testCheckLatin1(self):
engine, = PFAEngine.fromYaml('''
input: bytes
output: boolean
action:
bytes.isLatin1: input
''')
self.assertEqual(engine.action(struct.pack("bbbbb", 104, 101, 108, 108, 111)), True)
def testCheckUtf8(self):
engine, = PFAEngine.fromYaml('''
input: bytes
output: boolean
action:
bytes.isUtf8: input
''')
self.assertEqual(engine.action(struct.pack("bbbbb", 104, 101, 108, 108, 111)), True)
def testCheckUtf16(self):
engine, = PFAEngine.fromYaml('''
input: bytes
output: boolean
action:
bytes.isUtf16: input
''')
self.assertEqual(engine.action(struct.pack("bbbbbbbbbbbb", -1, -2, 104, 0, 101, 0, 108, 0, 108, 0, 111, 0)), True)
def testCheckUtf16be(self):
engine, = PFAEngine.fromYaml('''
input: bytes
output: boolean
action:
bytes.isUtf16be: input
''')
self.assertEqual(engine.action(struct.pack("bbbbbbbbbb", 0, 104, 0, 101, 0, 108, 0, 108, 0, 111)), True)
def testCheckUtf16le(self):
engine, = PFAEngine.fromYaml('''
input: bytes
output: boolean
action:
bytes.isUtf16le: input
''')
self.assertEqual(engine.action(struct.pack("bbbbbbbbbb", 104, 0, 101, 0, 108, 0, 108, 0, 111, 0)), True)
#################################################################### decoders
def testDecodeAscii(self):
engine, = PFAEngine.fromYaml('''
input: bytes
output: string
action:
bytes.decodeAscii: input
''')
self.assertEqual(engine.action(struct.pack("bbbbb", 104, 101, 108, 108, 111)), "hello")
self.assertRaises(PFARuntimeException, lambda: engine.action(struct.pack("bbbbb", 104, 101, 108, -127, 111)))
def testDecodeLatin1(self):
engine, = PFAEngine.fromYaml('''
input: bytes
output: string
action:
bytes.decodeLatin1: input
''')
self.assertEqual(engine.action(struct.pack("bbbbb", 104, 101, 108, 108, 111)), "hello")
def testDecodeUtf8(self):
engine, = PFAEngine.fromYaml('''
input: bytes
output: string
action:
bytes.decodeUtf8: input
''')
self.assertEqual(engine.action(struct.pack("bbbbb", 104, 101, 108, 108, 111)), "hello")
def testDecodeUtf16(self):
engine, = PFAEngine.fromYaml('''
input: bytes
output: string
action:
bytes.decodeUtf16: input
''')
self.assertEqual(engine.action(struct.pack("bbbbbbbbbbbb", -1, -2, 104, 0, 101, 0, 108, 0, 108, 0, 111, 0)), "hello")
def testDecodeUtf16be(self):
engine, = PFAEngine.fromYaml('''
input: bytes
output: string
action:
bytes.decodeUtf16be: input
''')
self.assertEqual(engine.action(struct.pack("bbbbbbbbbb", 0, 104, 0, 101, 0, 108, 0, 108, 0, 111)), "hello")
def testDecodeUtf16le(self):
engine, = PFAEngine.fromYaml('''
input: bytes
output: string
action:
bytes.decodeUtf16le: input
''')
self.assertEqual(engine.action(struct.pack("bbbbbbbbbb", 104, 0, 101, 0, 108, 0, 108, 0, 111, 0)), "hello")
#################################################################### encoders
def testEncodeAscii(self):
engine, = PFAEngine.fromYaml('''
input: string
output: bytes
action:
bytes.encodeAscii: input
''')
self.assertEqual(engine.action("hello"), struct.pack("bbbbb", 104, 101, 108, 108, 111))
self.assertRaises(PFARuntimeException, lambda: engine.action("hel\x81o"))
def testEncodeLatin1(self):
engine, = PFAEngine.fromYaml('''
input: string
output: bytes
action:
bytes.encodeLatin1: input
''')
self.assertEqual(engine.action("hello"), struct.pack("bbbbb", 104, 101, 108, 108, 111))
def testEncodeUtf8(self):
engine, = PFAEngine.fromYaml('''
input: string
output: bytes
action:
bytes.encodeUtf8: input
''')
self.assertEqual(engine.action("hello"), struct.pack("bbbbb", 104, 101, 108, 108, 111))
def testEncodeUtf16(self):
engine, = PFAEngine.fromYaml('''
input: string
output: bytes
action:
bytes.encodeUtf16: input
''')
self.assertEqual(engine.action("hello"), struct.pack("bbbbbbbbbbbb", -1, -2, 104, 0, 101, 0, 108, 0, 108, 0, 111, 0))
def testEncodeUtf16be(self):
engine, = PFAEngine.fromYaml('''
input: string
output: bytes
action:
bytes.encodeUtf16be: input
''')
self.assertEqual(engine.action("hello"), struct.pack("bbbbbbbbbb", 0, 104, 0, 101, 0, 108, 0, 108, 0, 111))
def testEncodeUtf16le(self):
engine, = PFAEngine.fromYaml('''
input: string
output: bytes
action:
bytes.encodeUtf16le: input
''')
self.assertEqual(engine.action("hello"), struct.pack("bbbbbbbbbb", 104, 0, 101, 0, 108, 0, 108, 0, 111, 0))
#################################################################### encoders
def testConvertToBase64(self):
engine, = PFAEngine.fromYaml('''
input: bytes
output: string
action:
bytes.toBase64: input
''')
self.assertEqual(engine.action(struct.pack("bbbbb", 0, 127, 64, 38, 22)), "AH9AJhY=")
def testConvertFromBase64(self):
engine, = PFAEngine.fromYaml('''
input: string
output: bytes
action:
bytes.fromBase64: input
''')
self.assertEqual(engine.action("AH9AJhY="), struct.pack("bbbbb", 0, 127, 64, 38, 22))
```
#### File: test/lib/testEnum.py
```python
import math
import unittest
from titus.genpy import PFAEngine
from titus.errors import *
class TestLib1Enum(unittest.TestCase):
def testToString(self):
engine, = PFAEngine.fromYaml('''
input: {type: enum, name: Test, symbols: ["A", "B", "C"]}
output: string
action:
enum.toString: input
''')
self.assertEqual(engine.action("A"), "A")
self.assertEqual(engine.action("B"), "B")
self.assertEqual(engine.action("C"), "C")
self.assertRaises(AvroException, lambda: engine.action("D"))
def testToInt(self):
engine, = PFAEngine.fromYaml('''
input: {type: enum, name: Test, symbols: ["A", "B", "C"]}
output: int
action:
enum.toInt: input
''')
self.assertEqual(engine.action("A"), 0)
self.assertEqual(engine.action("B"), 1)
self.assertEqual(engine.action("C"), 2)
def testNumSymbols(self):
engine, = PFAEngine.fromYaml('''
input: {type: enum, name: Test, symbols: ["A", "B", "C"]}
output: int
action:
enum.numSymbols: input
''')
self.assertEqual(engine.action("A"), 3)
self.assertEqual(engine.action("B"), 3)
self.assertEqual(engine.action("C"), 3)
```
#### File: test/lib/testRand.py
```python
import unittest
import math
import struct
from titus.genpy import PFAEngine
from titus.errors import *
class TestLib1Rand(unittest.TestCase):
def testInt(self):
engine1, = PFAEngine.fromYaml('''
input: "null"
output: int
randseed: 12345
action: {rand.int: []}
''')
self.assertEqual(engine1.action(None), -358114921)
self.assertEqual(engine1.action(None), -2103807398)
self.assertEqual(engine1.action(None), 1396751321)
engine2, = PFAEngine.fromYaml('''
input: "null"
output: int
randseed: 12345
action: {rand.int: [5, 10]}
''')
self.assertEqual(engine2.action(None), 7)
self.assertEqual(engine2.action(None), 5)
self.assertEqual(engine2.action(None), 9)
def testLong(self):
engine1, = PFAEngine.fromYaml('''
input: "null"
output: long
randseed: 12345
action: {rand.long: []}
''')
self.assertEqual(engine1.action(None), 4292285838037326215)
self.assertEqual(engine1.action(None), 6551146165133617474)
self.assertEqual(engine1.action(None), -5650950641291792112)
engine2, = PFAEngine.fromYaml('''
input: "null"
output: long
randseed: 12345
action: {rand.long: [5, 10]}
''')
self.assertEqual(engine2.action(None), 7)
self.assertEqual(engine2.action(None), 5)
self.assertEqual(engine2.action(None), 9)
def testFloat(self):
engine, = PFAEngine.fromYaml('''
input: "null"
output: float
randseed: 12345
action: {rand.float: [5, 10]}
''')
self.assertAlmostEqual(engine.action(None), 7.08309936273, places=5)
self.assertAlmostEqual(engine.action(None), 5.05084584729, places=5)
self.assertAlmostEqual(engine.action(None), 9.12603254627, places=5)
def testDouble(self):
engine, = PFAEngine.fromYaml('''
input: "null"
output: double
randseed: 12345
action: {rand.double: [5, 10]}
''')
self.assertAlmostEqual(engine.action(None), 7.08309936273, places=5)
self.assertAlmostEqual(engine.action(None), 5.05084584729, places=5)
self.assertAlmostEqual(engine.action(None), 9.12603254627, places=5)
def testChoice(self):
engine, = PFAEngine.fromYaml('''
input:
type: array
items: string
output: string
randseed: 12345
action: {rand.choice: input}
''')
self.assertEqual(engine.action(["one", "two", "three", "four", "five"]), "three")
self.assertEqual(engine.action(["one", "two", "three", "four", "five"]), "one")
self.assertEqual(engine.action(["one", "two", "three", "four", "five"]), "five")
self.assertEqual(engine.action(["one", "two", "three", "four", "five"]), "two")
self.assertEqual(engine.action(["one", "two", "three", "four", "five"]), "two")
def testChoicesWithReplacement(self):
engine, = PFAEngine.fromYaml('''
input:
type: array
items: string
output:
type: array
items: string
randseed: 12345
action: {rand.choices: [3, input]}
''')
self.assertEqual(engine.action(["one", "two", "three", "four", "five"]), ["three", "one", "five"])
self.assertEqual(engine.action(["one", "two", "three", "four", "five"]), ["two", "two", "one"])
self.assertEqual(engine.action(["one", "two", "three", "four", "five"]), ["three", "one", "one"])
self.assertEqual(engine.action(["one", "two", "three", "four", "five"]), ["three", "three", "one"])
self.assertEqual(engine.action(["one", "two", "three", "four", "five"]), ["three", "two", "five"])
def testSampleWithoutReplacement(self):
engine, = PFAEngine.fromYaml('''
input:
type: array
items: string
output:
type: array
items: string
randseed: 12345
action: {rand.sample: [3, input]}
''')
self.assertEqual(engine.action(["one", "two", "three", "four", "five"]), ["three", "one", "five"])
self.assertEqual(engine.action(["one", "two", "three", "four", "five"]), ["two", "five", "one"])
self.assertEqual(engine.action(["one", "two", "three", "four", "five"]), ["three", "one", "four"])
self.assertEqual(engine.action(["one", "two", "three", "four", "five"]), ["three", "five", "one"])
self.assertEqual(engine.action(["one", "two", "three", "four", "five"]), ["three", "two", "five"])
def testHistogram(self):
engine, = PFAEngine.fromYaml('''
input: "null"
output: int
randseed: 12345
action: {rand.histogram: {value: [3.3, 2.2, 5.5, 0.0, 1.1, 8.8], type: {type: array, items: double}}}
''')
results = [engine.action(None) for i in xrange(0, 10000)]
self.assertAlmostEqual(results.count(0) / 10000.0, 0.15789473684210525, places=2)
self.assertAlmostEqual(results.count(1) / 10000.0, 0.10526315789473686, places=2)
self.assertAlmostEqual(results.count(2) / 10000.0, 0.26315789473684215, places=2)
self.assertAlmostEqual(results.count(3) / 10000.0, 0.0, places=2)
self.assertAlmostEqual(results.count(4) / 10000.0, 0.05263157894736843, places=2)
self.assertAlmostEqual(results.count(5) / 10000.0, 0.42105263157894746, places=2)
self.assertAlmostEqual(results.count(6) / 10000.0, 0.0, places=2)
def testHistogram2(self):
engine, = PFAEngine.fromYaml('''
input: "null"
output: HistogramItem
randseed: 12345
cells:
hist:
type:
type: array
items:
type: record
name: HistogramItem
fields:
- {name: label, type: string}
- {name: prob, type: double}
init:
- {label: A, prob: 3.3}
- {label: B, prob: 2.2}
- {label: C, prob: 5.5}
- {label: D, prob: 0.0}
- {label: E, prob: 1.1}
- {label: F, prob: 8.8}
action: {rand.histogram: {cell: hist}}
''')
results = [engine.action(None) for i in xrange(0, 10000)]
self.assertAlmostEqual(sum(1 for x in results if x["label"] == "A") / 10000.0, 0.15789473684210525, places=2)
self.assertAlmostEqual(sum(1 for x in results if x["label"] == "B") / 10000.0, 0.10526315789473686, places=2)
self.assertAlmostEqual(sum(1 for x in results if x["label"] == "C") / 10000.0, 0.26315789473684215, places=2)
self.assertAlmostEqual(sum(1 for x in results if x["label"] == "D") / 10000.0, 0.0, places=2)
self.assertAlmostEqual(sum(1 for x in results if x["label"] == "E") / 10000.0, 0.05263157894736843, places=2)
self.assertAlmostEqual(sum(1 for x in results if x["label"] == "F") / 10000.0, 0.42105263157894746, places=2)
self.assertAlmostEqual(sum(1 for x in results if x["label"] == "G") / 10000.0, 0.0, places=2)
def testString(self):
engine1, = PFAEngine.fromYaml('''
input: "null"
output: string
randseed: 12345
action: {rand.string: [10]}
''')
self.assertEqual(engine1.action(None), u"姾ȳ눿䂂侔⧕穂⋭嶄")
self.assertEqual(engine1.action(None), u"祩▩睿䲩컲Ꮉ퍣夅泚 ")
self.assertEqual(engine1.action(None), u"魍⤉䧇ԕ䥖탺퍬ꃒÀ쬘")
engine2, = PFAEngine.fromYaml('''
input: "null"
output: string
randseed: 12345
action: {rand.string: [10, {string: "abcdefghijklmnopqrstuvwxyz0123456789"}]}
''')
self.assertEqual(engine2.action(None), "oa3kngufep")
self.assertEqual(engine2.action(None), "ugtm8d9osf")
self.assertEqual(engine2.action(None), "zgmam890a7")
engine3, = PFAEngine.fromYaml('''
input: "null"
output: string
randseed: 12345
action: {rand.string: [10, 33, 127]}
''')
self.assertEqual(engine3.action(None), "H!n=C3V0,I")
self.assertEqual(engine3.action(None), "U1UB{)|GP.")
self.assertEqual(engine3.action(None), "d2A#@{}f!y")
def testBytes(self):
engine1, = PFAEngine.fromYaml('''
input: "null"
output: bytes
randseed: 12345
action: {rand.bytes: [10]}
''')
self.assertEqual(engine1.action(None), "j\x02\xd3L^1\x90)\x1fn")
self.assertEqual(engine1.action(None), "\x8f,\x8dZ\xf5\x17\xfai\x81%")
self.assertEqual(engine1.action(None), "\xb80W\x06V\xf7\xfa\xbe\x00\xf0")
engine2, = PFAEngine.fromYaml('''
input: "null"
output: bytes
randseed: 12345
action: {rand.bytes: [10, {base64: "YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXowMTIzNDU2Nzg5"}]}
''')
self.assertEqual(engine2.action(None), "oa3kngufep")
self.assertEqual(engine2.action(None), "ugtm8d9osf")
self.assertEqual(engine2.action(None), "zgmam890a7")
engine3, = PFAEngine.fromYaml('''
input: "null"
output: bytes
randseed: 12345
action: {rand.bytes: [10, 33, 127]}
''')
self.assertEqual(engine3.action(None), "H!n=C3V0,I")
self.assertEqual(engine3.action(None), "U1UB{)|GP.")
self.assertEqual(engine3.action(None), "d2A#@{}f!y")
def testUUID(self):
engine1, = PFAEngine.fromYaml('''
input: "null"
output: string
randseed: 12345
action: {rand.uuid4: []}
''')
self.assertEqual(engine1.action(None), "6aa79987-bb91-4029-8d1f-cd8778e7d340bbcd")
self.assertEqual(engine1.action(None), "4c73a942-daea-45e5-8ee8-452ec40a3193ca54")
self.assertEqual(engine1.action(None), "90e5e945-6fac-4296-85f8-dfc9e3b11fcff454")
engine2, = PFAEngine.fromYaml('''
input: "null"
output: string
action: {s.substr: [{rand.uuid4: []}, 14, 15]}
''')
for i in xrange(1000):
self.assertEqual(engine2.action(None), "4")
engine3, = PFAEngine.fromYaml('''
input: "null"
output: string
action: {s.substr: [{rand.uuid4: []}, 19, 20]}
''')
for i in xrange(1000):
self.assertEqual(engine3.action(None), "8")
def testGaussian(self):
engine, = PFAEngine.fromYaml('''
input: "null"
output: double
randseed: 12345
action: {rand.gaussian: [10, 2]}
''')
self.assertAlmostEqual(engine.action(None), 9.75239840882, places=5)
self.assertAlmostEqual(engine.action(None), 10.143049927, places=5)
self.assertAlmostEqual(engine.action(None), 10.7667383886, places=5)
if __name__ == "__main__":
unittest.main()
```
#### File: test/prettypfa/testTutorial.py
```python
import json
import unittest
import titus.prettypfa
from titus.genpy import PFAEngine
from titus.errors import PFAInitializationException, PFAUserException
# All of the tutorial examples as PrettyPFA
class TestTutorial(unittest.TestCase):
def fallbackCheck(self, result, expectedResult):
try:
self.assertEqual(result, expectedResult)
except AssertionError:
if isinstance(result, float) and isinstance(expectedResult, float):
self.assertAlmostEqual(result, expectedResult, places=5)
elif isinstance(result, (list, tuple)) and isinstance(expectedResult, (list, tuple)) and len(result) == len(expectedResult):
for x, y in zip(result, expectedResult):
self.fallbackCheck(x, y)
else:
raise
def check(self, inputs, pfa, outputs, allowedExceptions=()):
inputs = map(json.loads, inputs.strip().split("\n"))
outputs = map(json.loads, outputs.strip().split("\n"))
engine, = titus.prettypfa.engine(pfa)
if engine.config.method == "emit":
outputs.reverse()
engine.emit = lambda result: self.assertEqual(result, outputs.pop())
for datum in inputs:
try:
engine.action(datum)
except Exception as err:
if isinstance(err, allowedExceptions):
pass
else:
raise
else:
index = 0
for datum in inputs:
try:
result = engine.action(datum)
except Exception as err:
if isinstance(err, allowedExceptions):
pass
else:
raise
else:
self.fallbackCheck(result, outputs[index])
index += 1
def testTutorial1_1(self):
self.check('''
1
2
3
''', r'''
input: double
output: double
action: input + 100
''', '''
101.0
102.0
103.0''')
def testTutorial1_2(self):
self.check('''
1
2
3
''', r'''
input: double
output: double
action: m.round(m.sin(input + 100) * 100)
''', '''
45.0
99.0
62.0''')
def testTutorial1_3(self):
self.check('''
1
2
3
''', r'''
input: double
output: double
action:
m.round(m.sin(input + 100) * 100);
''', '''
45.0
99.0
62.0''')
def testTutorial1_4(self):
self.check('''
1
2
3
4
5
''', r'''
input: double
output: double
action: m.sqrt(input)
''', '''
1.0
1.4142135623730951
1.7320508075688772
2.0
2.23606797749979
''')
def testTutorial1_5(self):
self.check('''
1
2
3
4
5
''', r'''
input: double
output: double
method: emit
action:
if (input % 2 == 0)
emit(input / 2)
''', '''
1.0
2.0
''')
def testTutorial1_6(self):
self.check('''
1
2
3
4
5
''', r'''
input: double
output: double
method: fold
zero: 0
action: input + tally
merge: tallyOne + tallyTwo
''', '''
1.0
3.0
6.0
10.0
15.0
''')
def testTutorial1_7(self):
self.check('''
1
2
3
4
5
''', r'''
input: int
output: string
method: fold
zero: ""
action:
s.concat(tally, s.int(input))
merge:
s.concat(tallyOne, tallyTwo)
''', '''
"1"
"12"
"123"
"1234"
"12345"
''')
def testTutorial2_2(self):
self.check('''
{"name": "Sun", "x": 0.0, "y": 0.0, "z": 0.0, "spec": "G2 V", "planets": true, "mag": {"double": -26.72}}
{"name": "Proxima Centauri", "x": 2.94, "y": -3.05, "z": -0.14, "spec": "M5 Ve", "planets": false, "mag": {"double": 11.05}}
{"name": "Alpha Centauri A", "x": 3.13, "y": -3.05, "z": -0.05, "spec": "G2 V", "planets": false, "mag": {"double": 0.01}}
{"name": "Alpha Centauri B", "x": 3.13, "y": -3.05, "z": -0.05, "spec": "K0 V", "planets": false, "mag": {"double": 1.34}}
{"name": "Alpha Centauri Bb", "x": 3.13, "y": -3.05, "z": -0.05, "spec": "", "planets": false, "mag": null}
{"name": "<NAME>", "x": 4.97, "y": 2.99, "z": 1.45, "spec": "M3.5 V", "planets": false, "mag": {"double": 9.57}}
{"name": "<NAME> A", "x": 1.72, "y": -6.32, "z": 0.61, "spec": "L7.5", "planets": false, "mag": null}
{"name": "<NAME>", "x": 1.72, "y": -6.32, "z": 0.61, "spec": "T0.5", "planets": false, "mag": null}
{"name": "<NAME>", "x": -1.90, "y": -3.90, "z": 6.46, "spec": "M5.5 V", "planets": false, "mag": {"double": 13.53}}
{"name": "Lalande 21185", "x": -3.44, "y": -0.31, "z": 7.54, "spec": "M2 V", "planets": false, "mag": {"double": 7.47}}
{"name": "<NAME>", "x": -5.76, "y": -6.22, "z": -1.33, "spec": "A1 V", "planets": false, "mag": {"double": -1.43}}
{"name": "<NAME>", "x": -5.76, "y": -6.22, "z": -1.33, "spec": "DA2", "planets": false, "mag": {"double": 8.44}}
{"name": "Luyten 726-8 A", "x": -2.15, "y": 0.17, "z": -8.46, "spec": "M5.5 V", "planets": false, "mag": {"double": 12.61}}
{"name": "Luyten 726-8 B", "x": -2.15, "y": 0.17, "z": -8.46, "spec": "M6 V", "planets": false, "mag": {"double": 13.06}}
{"name": "WISEP J154151.66-225025.2", "x": 8.17, "y": -1.95, "z": 3.96, "spec": "Y0.5", "planets": false, "mag": null}
{"name": "<NAME>", "x": 9.33, "y": 1.87, "z": -1.73, "spec": "M3.5 Ve", "planets": false, "mag": {"double": 10.44}}
{"name": "WISEPC J205628.90+145953.3", "x": 4.34, "y": 8.16, "z": -3.22, "spec": "Y0", "planets": false, "mag": null}
{"name": "<NAME>", "x": -3.37, "y": 9.27, "z": -3.00, "spec": "M5.5 V", "planets": false, "mag": {"double": 12.29}}
{"name": "<NAME>", "x": -6.74, "y": -1.91, "z": -7.79, "spec": "K2 V", "planets": false, "mag": {"double": 3.73}}
{"name": "<NAME> b", "x": -6.74, "y": -1.91, "z": -7.79, "spec": "", "planets": true, "mag": null}
{"name": "<NAME> c", "x": -6.75, "y": -1.91, "z": -7.80, "spec": "", "planets": false, "mag": null}
''', r'''
input: record(name: string,
x: double,
y: double,
z: double,
spec: string,
planets: boolean,
mag: union(double, null))
output: double
action: m.sqrt(a.sum(new(array(double), input.x**2, input.y**2, input.z**2)))
''', '''
0.0
4.23859646581271
4.37057204493874
4.37057204493874
4.37057204493874
5.9785867895347975
6.578214043340336
6.578214043340336
7.781490859726046
8.293449222127064
8.581078020854955
8.581078020854955
8.73057844589922
8.73057844589922
9.286172516166173
9.671540725241249
9.787216151695027
10.30969446685982
10.476631137918334
10.476631137918334
10.490500464706152
''')
def testTutorial2_3(self):
self.check('''
{"name": "Sun", "x": 0.0, "y": 0.0, "z": 0.0, "spec": "G2 V", "planets": true, "mag": {"double": -26.72}}
{"name": "Proxima Centauri", "x": 2.94, "y": -3.05, "z": -0.14, "spec": "M5 Ve", "planets": false, "mag": {"double": 11.05}}
{"name": "Alpha Centauri A", "x": 3.13, "y": -3.05, "z": -0.05, "spec": "G2 V", "planets": false, "mag": {"double": 0.01}}
{"name": "Alpha Centauri B", "x": 3.13, "y": -3.05, "z": -0.05, "spec": "K0 V", "planets": false, "mag": {"double": 1.34}}
{"name": "Alpha Centauri Bb", "x": 3.13, "y": -3.05, "z": -0.05, "spec": "", "planets": false, "mag": null}
{"name": "<NAME>", "x": 4.97, "y": 2.99, "z": 1.45, "spec": "M3.5 V", "planets": false, "mag": {"double": 9.57}}
{"name": "<NAME>", "x": 1.72, "y": -6.32, "z": 0.61, "spec": "L7.5", "planets": false, "mag": null}
{"name": "<NAME>", "x": 1.72, "y": -6.32, "z": 0.61, "spec": "T0.5", "planets": false, "mag": null}
''', r'''
input: record(name: string,
x: double,
y: double,
z: double,
spec: string,
planets: boolean,
mag: union(double, null))
output: double
method: emit
action:
cast (input.mag) {
as(magDouble: double) emit(magDouble)
as(magNull: null) null
}
''', '''
-26.72
11.05
0.01
1.34
9.57
''')
def testTutorial2_4(self):
self.check('''
1
2
3
4
5
''', r'''
input: double
output: double
action:
var x = input;
var y = (input + 1) * input;
y = y / input;
y - 1
''', '''
1.0
2.0
3.0
4.0
5.0
''')
def testTutorial2_5(self):
self.check('''
1
2
3
4
5
''', r'''
input: double
output: double
action:
var x = input + 1,
y = input + 2,
z = input + 3;
var a = x + y - z,
b = x * y / z;
a / b
''', '''
0.6666666666666666
0.8333333333333334
0.8999999999999999
0.9333333333333333
0.9523809523809523
''')
def testTutorial2_6(self):
self.check('''
1
2
3
4
5
''', r'''
input: int
output: int
method: emit
action:
if (input % 2 == 0)
emit(input)
''', '''
2
4
''')
def testTutorial2_7(self):
self.check('''
1
2
3
4
5
''', r'''
input: int
output: string
action:
if (input % 2 == 0)
"even"
else
"odd"
''', '''
"odd"
"even"
"odd"
"even"
"odd"
''')
def testTutorial2_8(self):
self.check('''
0
1
2
3
4
5
''', r'''
input: int
output: string
action:
if (input % 3 == 0)
"off"
else if (input % 3 == 1)
"on"
else
"high impedance"
''', '''
"off"
"on"
"high impedance"
"off"
"on"
"high impedance"
''')
def testTutorial2_9(self):
self.check('''
null
''', r'''
input: null
output: int
method: emit
action:
var i = 0;
while (i < 10) {
i = i + 1;
emit(i)
}
''', '''
1
2
3
4
5
6
7
8
9
10
''')
def testTutorial2_10(self):
self.check('''
null
''', r'''
input: null
output: int
method: emit
action:
var i = 0;
do {
i = i + 1;
emit(i)
} until (i == 10)
''', '''
1
2
3
4
5
6
7
8
9
10
''')
def testTutorial2_11(self):
self.check('''
["hello", "my", "ragtime", "gal"]
''', r'''
input: array(string)
output: string
method: emit
action:
for (i = 0; i < 4; i = i + 1)
emit(input[i]);
foreach (x : input)
emit(x);
foreach (k, v : new(map(int), one: 1, two: 2, three: 3))
emit(k);
''', '''
"hello"
"my"
"ragtime"
"gal"
"hello"
"my"
"ragtime"
"gal"
"three"
"two"
"one"
''')
def testTutorial2_12(self):
self.check('''
1
2
3
4
5
''', r'''
input: int
output: int
action: u.squared(u.cubed(input))
fcns:
squared = fcn(x: int -> int) {
x * x
};
cubed = fcn(x: int -> int) {
x * u.squared(x)
}
''', '''
1
64
729
4096
15625
''')
def testTutorial2_13(self):
self.check('''
["hello", "my", "darling", "hello", "my", "honey", "hello", "my", "ragtime", "gal"]
''', r'''
input: array(string)
output: string
action: a.maxLT(input, u.customLessThan)
fcns:
customLessThan = fcn(x: string, y: string -> boolean)
s.len(x) < s.len(y)
''', '''
"darling"
''')
def testTutorial2_14(self):
self.check('''
true
false
''', r'''
input: boolean
output: array(int)
action:
var sortme = json(array(int), [23, 55, 18, 62, 4, 99]);
a.sortLT(sortme,
fcn(x: int, y: int -> boolean) if (input) x < y else x > y)
''', '''
[4,18,23,55,62,99]
[99,62,55,23,18,4]
''')
def testTutorial2_15(self):
self.check('''
5
25
''', r'''
input: int
output: array(double)
action: u.bernoulli(input)
fcns:
bernoulli = fcn(N: int -> array(double)) {
var BN = new(array(double), 1, -0.5);
for (M = 2; M <= N; M = M + 1) {
var S = -(1/(M + 1) - 0.5);
for (K = 2; K != M; K = K + 1) {
var R = 1.0;
for (J = 2; J <= K; J = J + 1)
R = R*(J + M - K)/J;
S = S - R*BN[K];
};
BN = a.append(BN, S);
};
for (M = 3; M <= N; M = M + 2)
BN = a.replace(BN, M, 0);
BN
}
''', '''
[1.0,-0.5,0.16666666666666669,0.0,-0.03333333333333338,0.0]
[1.0,-0.5,0.16666666666666669,0.0,-0.03333333333333338,0.0,0.023809523809523808,0.0,-0.03333333333333302,0.0,0.07575757575757641,0.0,-0.2531135531135573,0.0,1.166666666666674,0.0,-7.0921568627451705,0.0,54.97117794486118,0.0,-529.12424242423,0.0,6192.123188405604,0.0,-86580.25311355002,0.0]
''')
def testTutorial3_1(self):
self.check('''
"hello"
"my"
"darling"
"hello"
"my"
"honey"
"hello"
"my"
"ragtime"
"gal"
''', r'''
input: string
output: string
cells:
longest(string) = ""
action:
if (s.len(input) > s.len(longest)) {
longest = input;
input
}
else
longest
''', '''
"hello"
"hello"
"darling"
"darling"
"darling"
"darling"
"darling"
"darling"
"darling"
"darling"
''')
def testTutorial3_2(self):
self.check('''
"hello"
"my"
"darling"
"hello"
"my"
"honey"
"hello"
"my"
"ragtime"
"gal"
''', r'''
input: string
output: int
pools:
wordCount(int) = {}
action:
wordCount[input] to fcn(x: int -> int) x + 1 init 0;
wordCount["hello"]
''', '''
1
1
1
2
2
2
3
3
3
3
''')
def testTutorial3_3(self):
self.assertRaises(PFAInitializationException, lambda: titus.prettypfa.engine(r'''
input: string
output: int
cells:
one(int) = 1;
two(int) = 2;
action:
one to u.changeOne
fcns:
changeOne = fcn(x: int -> int) u.functionThatCallsChangeTwo();
functionThatCallsChangeTwo = fcn(-> int) {
two to u.changeTwo;
1
};
changeTwo = fcn(x: int -> int) 2
'''))
def testTutorial3_4(self):
self.check('''
{"one": 11, "two": 3.6, "three": "TEST"}
{"one": 11, "two": 3.4, "three": "TEST"}
{"one": 13, "two": 3.6, "three": "TEST"}
{"one": 13, "two": 3.6, "three": "NOT-TEST"}
''', r'''
input: record(one: int, two: double, three: string, Datum)
output: string
cells:
tree(record(field: enum([one, two, three], TreeFields),
operator: string,
value: union(double, string),
pass: union(string, TreeNode),
fail: union(string, TreeNode), TreeNode)) =
{field: one,
operator: "<",
value: {double: 12},
pass: {TreeNode: {
field: two,
operator: ">",
value: {double: 3.5},
pass: {string: "yes-yes"},
fail: {string: "yes-no"}}},
fail: {TreeNode: {
field: three,
operator: "==",
value: {string: TEST},
pass: {string: "no-yes"},
fail: {string: "no-no"}}}}
action:
model.tree.simpleWalk(input, tree, fcn(d: Datum, t: TreeNode -> boolean) model.tree.simpleTest(d, t))
''', '''
"yes-yes"
"yes-no"
"no-yes"
"no-no"
''')
def testTutorial3_5(self):
self.check('''
1
2
3
4
5
''', r'''
input: int
output: int
cells:
counter(int, rollback: true) = 0
action:
counter to fcn(x: int -> int) x + 1;
if (input < 4)
error("This one is too small.");
counter
''', '''
1
2
''', PFAUserException)
def testTutorial3_6(self):
self.check('''
1
2
3
4
5
''', r'''
input: int
output: int
action:
if (input > 3)
u.callfunc(input);
input
fcns:
callfunc = fcn(x: int -> null) log("enter callfunc", x)
''', '''
1
2
3
4
5
''', PFAUserException)
def testModels_1(self):
self.check('''
[1.2, 1.2, 1.2, 1.2, 1.2]
[1.8, 1.8, 1.8, 1.8, 1.8]
[2.2, 2.2, 2.2, 2.2, 2.2]
[5.0, 5.0, 5.0, 5.0, 5.0]
[-1000.0, -1000.0, -1000.0, -1000.0, -1000.0]
''', r'''
input: array(double)
output: string
cells:
clusters(array(record(center: array(double), id: string, Cluster))) =
[{id: one, center: [1, 1, 1, 1, 1]},
{id: two, center: [2, 2, 2, 2, 2]},
{id: three, center: [3, 3, 3, 3, 3]},
{id: four, center: [4, 4, 4, 4, 4]},
{id: five, center: [5, 5, 5, 5, 5]}]
action:
model.cluster.closest(input,
clusters,
fcn(x: array(double), y: array(double) -> double)
metric.euclidean(metric.absDiff, x, y))["id"]
''', '''
"one"
"two"
"two"
"five"
"one"
''', PFAUserException)
def testModels_2(self):
self.check('''
{"one": 1, "two": 7, "three": "whatever"}
{"one": 1, "two": 0, "three": "whatever"}
{"one": 15, "two": 7, "three": "TEST"}
{"one": 15, "two": 7, "three": "ZEST"}
''', r'''
input: record(one: int, two: double, three: string, Datum)
output: string
cells:
tree(record(field: enum([one, two, three], Fields),
operator: string,
value: union(int, double, string),
pass: union(string, TreeNode),
fail: union(string, TreeNode), TreeNode)) =
{field: one,
operator: "<",
value: {double: 12},
pass: {TreeNode: {
field: two,
operator: ">",
value: {double: 3.5},
pass: {string: "yes-yes"},
fail: {string: "yes-no"}}},
fail: {TreeNode: {
field: three,
operator: "==",
value: {string: TEST},
pass: {string: "no-yes"},
fail: {string: "no-no"}}}}
action:
model.tree.simpleWalk(input, tree,
fcn(d: Datum, t: TreeNode -> boolean) model.tree.simpleTest(d, t))
''', '''
"yes-yes"
"yes-no"
"no-yes"
"no-no"
''', PFAUserException)
def testModels_3(self):
self.check('''
3.35
-1.37
-3.92
6.74
12.06
3.81
3.35
-1.18
-1.39
5.55
5.3
12.8
10.36
12.05
3.8
12.81
11.1
8.37
7.32
15.22
''', r'''
input: double
output: boolean
cells:
last(double) = 0.0
method: emit
action:
last to fcn(oldValue: double -> double) {
var newValue = stat.change.updateCUSUM(
// alternate minus baseline
prob.dist.gaussianLL(input, 10.0, 3.0) - prob.dist.gaussianLL(input, 2.0, 5.0),
oldValue,
0.0); // resetValue = 0.0
emit(newValue > 5.0);
newValue
}
''', '''
false
false
false
false
false
false
false
false
false
false
false
false
false
true
true
true
true
true
true
true
''', PFAUserException)
def testModels_4(self):
self.check('''
{"key": "one", "value": 100.1}
{"key": "one", "value": 101.3}
{"key": "one", "value": 100.9}
{"key": "one", "value": 101.1}
{"key": "one", "value": 101.0}
{"key": "two", "value": 202.1}
{"key": "two", "value": 202.3}
{"key": "two", "value": 202.9}
{"key": "two", "value": 202.1}
{"key": "two", "value": 202.0}
{"key": "one", "value": 100.1}
{"key": "one", "value": 101.3}
{"key": "one", "value": 100.9}
{"key": "one", "value": 101.1}
{"key": "one", "value": 101.0}
{"key": "two", "value": 202.1}
{"key": "two", "value": 202.3}
{"key": "two", "value": 202.9}
{"key": "two", "value": 202.1}
{"key": "two", "value": 202.0}
''', r'''
input: record(key: string, value: double, Input)
output: record(key: string, zValue: double, Output)
pools:
counters(record(count: double, mean: double, variance: double, Counter)) = {}
method: emit
action:
counters[input.key] to fcn(oldCounter: Counter -> Counter) {
var newCounter = stat.sample.update(input.value, 1.0, oldCounter);
if (newCounter.count > 3)
emit(new(Output, key: input.key, zValue: stat.change.zValue(input.value, newCounter, false)));
newCounter
} init json(Counter, {count: 0.0, mean: 0.0, variance: 0.0})
''', '''
{"key":"one","zValue":0.54882129994845}
{"key":"one","zValue":0.29138575870718914}
{"key":"two","zValue":-0.7624928516630021}
{"key":"two","zValue":-0.8616404368553157}
{"key":"one","zValue":-1.3677897164722017}
{"key":"one","zValue":0.9816907935594097}
{"key":"one","zValue":0.13894250359421337}
{"key":"one","zValue":0.5400617248673225}
{"key":"one","zValue":0.2913857587071894}
{"key":"two","zValue":-0.49319696191608037}
{"key":"two","zValue":0.15191090506255248}
{"key":"two","zValue":1.659850005517447}
{"key":"two","zValue":-0.6434211884046508}
{"key":"two","zValue":-0.8616404368553247}
''', PFAUserException)
if __name__ == "__main__":
unittest.main()
```
#### File: titus/test/testDumpstate.py
```python
import json
import unittest
from titus.genpy import PFAEngine
class TestDumpstate(unittest.TestCase):
def testPrivateCellsInt(self):
engine, = PFAEngine.fromYaml('''
input: int
output: "null"
cells:
test:
type: int
init: 0
action:
- cell: test
to:
params: [{x: int}]
ret: int
do: {+: [x, input]}
- null
''')
self.assertEqual(engine.snapshot().cells["test"].init, "0")
engine.action(3)
self.assertEqual(engine.snapshot().cells["test"].init, "3")
engine.action(2)
self.assertEqual(engine.snapshot().cells["test"].init, "5")
def testPrivateCellsString(self):
engine, = PFAEngine.fromYaml('''
input: string
output: "null"
cells:
test:
type: string
init: ""
action:
- cell: test
to:
params: [{x: string}]
ret: string
do: {s.concat: [x, input]}
- null
''')
self.assertEqual(engine.snapshot().cells["test"].init, '""')
engine.action("hey")
self.assertEqual(engine.snapshot().cells["test"].init, '"hey"')
engine.action("there")
self.assertEqual(engine.snapshot().cells["test"].init, '"heythere"')
def testPrivateCellsArray(self):
engine, = PFAEngine.fromYaml('''
input: int
output: "null"
cells:
test:
type: {type: array, items: int}
init: []
action:
- cell: test
to:
params: [{x: {type: array, items: int}}]
ret: {type: array, items: int}
do: {a.append: [x, input]}
- null
''')
self.assertEqual(engine.snapshot().cells["test"].init, "[]")
engine.action(3)
self.assertEqual(engine.snapshot().cells["test"].init, "[3]")
engine.action(2)
self.assertEqual(engine.snapshot().cells["test"].init, "[3, 2]")
def testPrivateCellsMap(self):
engine, = PFAEngine.fromYaml('''
input: int
output: "null"
cells:
test:
type: {type: map, values: int}
init: {"a": 0, "b": 0}
action:
- cell: test
path: [{string: b}]
to:
params: [{x: int}]
ret: int
do: {+: [x, input]}
- null
''')
self.assertEqual(json.loads(engine.snapshot().cells["test"].init)["b"], 0)
engine.action(3)
self.assertEqual(json.loads(engine.snapshot().cells["test"].init)["b"], 3)
engine.action(2)
self.assertEqual(json.loads(engine.snapshot().cells["test"].init)["b"], 5)
def testPrivateCellsRecord(self):
engine, = PFAEngine.fromYaml('''
input: int
output: "null"
cells:
test:
type:
type: record
name: MyRecord
fields:
- {name: a, type: int}
- {name: b, type: string}
init: {a: 0, b: hey}
action:
- cell: test
path: [{string: a}]
to:
params: [{x: int}]
ret: int
do: {+: [x, input]}
- null
''')
self.assertEqual(json.loads(engine.snapshot().cells["test"].init)["a"], 0)
engine.action(3)
self.assertEqual(json.loads(engine.snapshot().cells["test"].init)["a"], 3)
engine.action(2)
self.assertEqual(json.loads(engine.snapshot().cells["test"].init)["a"], 5)
def testPrivateCellsUnion(self):
engine, = PFAEngine.fromYaml('''
input: int
output: "null"
cells:
test:
type: [int, string]
init: {int: 0}
action:
- cell: test
to:
params: [{x: [int, string]}]
ret: [int, string]
do:
cast: x
cases:
- as: int
named: y
do: {+: [y, input]}
- as: string
named: y
do: y
- null
''')
self.assertEqual(engine.snapshot().cells["test"].init, '{"int": 0}')
engine.action(3)
self.assertEqual(engine.snapshot().cells["test"].init, '3')
engine.action(2)
self.assertEqual(engine.snapshot().cells["test"].init, '5')
def testPublicCellsInt(self):
engine, = PFAEngine.fromYaml('''
input: int
output: "null"
cells:
test:
type: int
init: 0
shared: true
action:
- cell: test
to:
params: [{x: int}]
ret: int
do: {+: [x, input]}
- null
''')
self.assertEqual(engine.snapshot().cells["test"].init, "0")
engine.action(3)
self.assertEqual(engine.snapshot().cells["test"].init, "3")
engine.action(2)
self.assertEqual(engine.snapshot().cells["test"].init, "5")
def testPublicCellsString(self):
engine, = PFAEngine.fromYaml('''
input: string
output: "null"
cells:
test:
type: string
init: ""
shared: true
action:
- cell: test
to:
params: [{x: string}]
ret: string
do: {s.concat: [x, input]}
- null
''')
self.assertEqual(engine.snapshot().cells["test"].init, '""')
engine.action("hey")
self.assertEqual(engine.snapshot().cells["test"].init, '"hey"')
engine.action("there")
self.assertEqual(engine.snapshot().cells["test"].init, '"heythere"')
def testPublicCellsArray(self):
engine, = PFAEngine.fromYaml('''
input: int
output: "null"
cells:
test:
type: {type: array, items: int}
init: []
shared: true
action:
- cell: test
to:
params: [{x: {type: array, items: int}}]
ret: {type: array, items: int}
do: {a.append: [x, input]}
- null
''')
self.assertEqual(engine.snapshot().cells["test"].init, "[]")
engine.action(3)
self.assertEqual(engine.snapshot().cells["test"].init, "[3]")
engine.action(2)
self.assertEqual(engine.snapshot().cells["test"].init, "[3, 2]")
def testPublicCellsMap(self):
engine, = PFAEngine.fromYaml('''
input: int
output: "null"
cells:
test:
type: {type: map, values: int}
init: {"a": 0, "b": 0}
shared: true
action:
- cell: test
path: [{string: b}]
to:
params: [{x: int}]
ret: int
do: {+: [x, input]}
- null
''')
self.assertEqual(json.loads(engine.snapshot().cells["test"].init)["b"], 0)
engine.action(3)
self.assertEqual(json.loads(engine.snapshot().cells["test"].init)["b"], 3)
engine.action(2)
self.assertEqual(json.loads(engine.snapshot().cells["test"].init)["b"], 5)
def testPublicCellsRecord(self):
engine, = PFAEngine.fromYaml('''
input: int
output: "null"
cells:
test:
type:
type: record
name: MyRecord
fields:
- {name: a, type: int}
- {name: b, type: string}
init: {a: 0, b: hey}
shared: true
action:
- cell: test
path: [{string: a}]
to:
params: [{x: int}]
ret: int
do: {+: [x, input]}
- null
''')
self.assertEqual(json.loads(engine.snapshot().cells["test"].init)["a"], 0)
engine.action(3)
self.assertEqual(json.loads(engine.snapshot().cells["test"].init)["a"], 3)
engine.action(2)
self.assertEqual(json.loads(engine.snapshot().cells["test"].init)["a"], 5)
def testPublicCellsUnion(self):
engine, = PFAEngine.fromYaml('''
input: int
output: "null"
cells:
test:
type: [int, string]
init: {int: 0}
shared: true
action:
- cell: test
to:
params: [{x: [int, string]}]
ret: [int, string]
do:
cast: x
cases:
- as: int
named: y
do: {+: [y, input]}
- as: string
named: y
do: y
- null
''')
self.assertEqual(engine.snapshot().cells["test"].init, '{"int": 0}')
engine.action(3)
self.assertEqual(engine.snapshot().cells["test"].init, '3')
engine.action(2)
self.assertEqual(engine.snapshot().cells["test"].init, '5')
def testPrivatePoolsInt(self):
engine, = PFAEngine.fromYaml('''
input: int
output: "null"
pools:
test:
type: int
action:
- pool: test
path: [{string: zzz}]
to:
params: [{x: int}]
ret: int
do: {+: [x, input]}
init: 0
- null
''')
self.assertEqual(engine.snapshot().pools["test"].init, {})
engine.action(3)
self.assertEqual(engine.snapshot().pools["test"].init["zzz"], "3")
engine.action(2)
self.assertEqual(engine.snapshot().pools["test"].init["zzz"], "5")
def testPrivatePoolsString(self):
engine, = PFAEngine.fromYaml('''
input: string
output: "null"
pools:
test:
type: string
action:
- pool: test
path: [{string: zzz}]
to:
params: [{x: string}]
ret: string
do: {s.concat: [x, input]}
init: {string: ""}
- null
''')
self.assertEqual(engine.snapshot().pools["test"].init, {})
engine.action("hey")
self.assertEqual(engine.snapshot().pools["test"].init["zzz"], '"hey"')
engine.action("there")
self.assertEqual(engine.snapshot().pools["test"].init["zzz"], '"heythere"')
def testPrivatePoolsArray(self):
engine, = PFAEngine.fromYaml('''
input: int
output: "null"
pools:
test:
type: {type: array, items: int}
action:
- pool: test
path: [{string: zzz}]
to:
params: [{x: {type: array, items: int}}]
ret: {type: array, items: int}
do: {a.append: [x, input]}
init: {value: [], type: {type: array, items: int}}
- null
''')
self.assertEqual(engine.snapshot().pools["test"].init, {})
engine.action(3)
self.assertEqual(engine.snapshot().pools["test"].init["zzz"], "[3]")
engine.action(2)
self.assertEqual(engine.snapshot().pools["test"].init["zzz"], "[3, 2]")
def testPrivatePoolsMap(self):
engine, = PFAEngine.fromYaml('''
input: int
output: "null"
pools:
test:
type: {type: map, values: int}
init: {"zzz": {"a": 0, "b": 0}}
action:
- pool: test
path: [{string: zzz}, {string: b}]
to:
params: [{x: int}]
ret: int
do: {+: [x, input]}
init: {value: {}, type: {type: map, values: int}}
- null
''')
self.assertEqual(json.loads(engine.snapshot().pools["test"].init["zzz"])["b"], 0)
engine.action(3)
self.assertEqual(json.loads(engine.snapshot().pools["test"].init["zzz"])["b"], 3)
engine.action(2)
self.assertEqual(json.loads(engine.snapshot().pools["test"].init["zzz"])["b"], 5)
def testPrivatePoolsRecord(self):
engine, = PFAEngine.fromYaml('''
input: int
output: "null"
pools:
test:
type:
type: record
name: MyRecord
fields:
- {name: a, type: int}
- {name: b, type: string}
action:
- pool: test
path: [{string: zzz}, {string: a}]
to:
params: [{x: int}]
ret: int
do: {+: [x, input]}
init: {value: {a: 0, b: hey}, type: MyRecord}
- null
''')
self.assertEqual(engine.snapshot().pools["test"].init, {})
engine.action(3)
self.assertEqual(json.loads(engine.snapshot().pools["test"].init["zzz"])["a"], 3)
engine.action(2)
self.assertEqual(json.loads(engine.snapshot().pools["test"].init["zzz"])["a"], 5)
def testPrivatePoolsUnion(self):
engine, = PFAEngine.fromYaml('''
input: int
output: "null"
pools:
test:
type: [int, string]
action:
- pool: test
path: [{string: zzz}]
to:
params: [{x: [int, string]}]
ret: [int, string]
do:
cast: x
cases:
- as: int
named: y
do: {+: [y, input]}
- as: string
named: y
do: y
init: {int: 0}
- null
''')
self.assertEqual(engine.snapshot().pools["test"].init, {})
engine.action(3)
self.assertEqual(engine.snapshot().pools["test"].init["zzz"], "3")
engine.action(2)
self.assertEqual(engine.snapshot().pools["test"].init["zzz"], "5")
def testPublicPoolsInt(self):
engine, = PFAEngine.fromYaml('''
input: int
output: "null"
pools:
test:
type: int
shared: true
action:
- pool: test
path: [{string: zzz}]
to:
params: [{x: int}]
ret: int
do: {+: [x, input]}
init: 0
- null
''')
self.assertEqual(engine.snapshot().pools["test"].init, {})
engine.action(3)
self.assertEqual(engine.snapshot().pools["test"].init["zzz"], "3")
engine.action(2)
self.assertEqual(engine.snapshot().pools["test"].init["zzz"], "5")
def testPublicPoolsString(self):
engine, = PFAEngine.fromYaml('''
input: string
output: "null"
pools:
test:
type: string
shared: true
action:
- pool: test
path: [{string: zzz}]
to:
params: [{x: string}]
ret: string
do: {s.concat: [x, input]}
init: {string: ""}
- null
''')
self.assertEqual(engine.snapshot().pools["test"].init, {})
engine.action("hey")
self.assertEqual(engine.snapshot().pools["test"].init["zzz"], '"hey"')
engine.action("there")
self.assertEqual(engine.snapshot().pools["test"].init["zzz"], '"heythere"')
def testPublicPoolsArray(self):
engine, = PFAEngine.fromYaml('''
input: int
output: "null"
pools:
test:
type: {type: array, items: int}
shared: true
action:
- pool: test
path: [{string: zzz}]
to:
params: [{x: {type: array, items: int}}]
ret: {type: array, items: int}
do: {a.append: [x, input]}
init: {value: [], type: {type: array, items: int}}
- null
''')
self.assertEqual(engine.snapshot().pools["test"].init, {})
engine.action(3)
self.assertEqual(engine.snapshot().pools["test"].init["zzz"], "[3]")
engine.action(2)
self.assertEqual(engine.snapshot().pools["test"].init["zzz"], "[3, 2]")
def testPublicPoolsMap(self):
engine, = PFAEngine.fromYaml('''
input: int
output: "null"
pools:
test:
type: {type: map, values: int}
init: {"zzz": {"a": 0, "b": 0}}
shared: true
action:
- pool: test
path: [{string: zzz}, {string: b}]
to:
params: [{x: int}]
ret: int
do: {+: [x, input]}
init: {value: {}, type: {type: map, values: int}}
- null
''')
self.assertEqual(json.loads(engine.snapshot().pools["test"].init["zzz"])["b"], 0)
engine.action(3)
self.assertEqual(json.loads(engine.snapshot().pools["test"].init["zzz"])["b"], 3)
engine.action(2)
self.assertEqual(json.loads(engine.snapshot().pools["test"].init["zzz"])["b"], 5)
def testPublicPoolsRecord(self):
engine, = PFAEngine.fromYaml('''
input: int
output: "null"
pools:
test:
type:
type: record
name: MyRecord
fields:
- {name: a, type: int}
- {name: b, type: string}
shared: true
action:
- pool: test
path: [{string: zzz}, {string: a}]
to:
params: [{x: int}]
ret: int
do: {+: [x, input]}
init: {value: {a: 0, b: hey}, type: MyRecord}
- null
''')
self.assertEqual(engine.snapshot().pools["test"].init, {})
engine.action(3)
self.assertEqual(json.loads(engine.snapshot().pools["test"].init["zzz"])["a"], 3)
engine.action(2)
self.assertEqual(json.loads(engine.snapshot().pools["test"].init["zzz"])["a"], 5)
def testPublicPoolsUnion(self):
engine, = PFAEngine.fromYaml('''
input: int
output: "null"
pools:
test:
type: [int, string]
shared: true
action:
- pool: test
path: [{string: zzz}]
to:
params: [{x: [int, string]}]
ret: [int, string]
do:
cast: x
cases:
- as: int
named: y
do: {+: [y, input]}
- as: string
named: y
do: y
init: {int: 0}
- null
''')
self.assertEqual(engine.snapshot().pools["test"].init, {})
engine.action(3)
self.assertEqual(engine.snapshot().pools["test"].init["zzz"], "3")
engine.action(2)
self.assertEqual(engine.snapshot().pools["test"].init["zzz"], "5")
if __name__ == "__main__":
unittest.main()
```
#### File: titus/lib/cast.py
```python
import math
import io
import json
import avro.schema
from avro.io import BinaryEncoder, DatumWriter
from titus.util import untagUnion
from titus.datatype import schemaToAvroType
from titus.datatype import jsonNodeToAvroType
from titus.fcn import Fcn
from titus.fcn import LibFcn
from titus.signature import Sig
from titus.signature import Sigs
from titus.datatype import *
from titus.errors import *
from titus.lib.core import INT_MIN_VALUE
from titus.lib.core import INT_MAX_VALUE
from titus.lib.core import LONG_MIN_VALUE
from titus.lib.core import LONG_MAX_VALUE
from titus.lib.core import FLOAT_MIN_VALUE
from titus.lib.core import FLOAT_MAX_VALUE
from titus.lib.core import DOUBLE_MIN_VALUE
from titus.lib.core import DOUBLE_MAX_VALUE
from titus.datatype import jsonEncoder
import titus.P as P
provides = {}
def provide(fcn):
provides[fcn.name] = fcn
prefix = "cast."
#################################################################### wrap-around arithmetic
def bitsToMax(x): return 2**x
def doUnsigned(x, bits):
maximum = bitsToMax(bits)
if x < 0:
y = x + maximum * int(math.ceil(-float(x) / maximum))
else:
y = x
return y % maximum
class ToSigned(LibFcn):
name = prefix + "signed"
sig = Sig([{"x": P.Long()}, {"bits": P.Int()}], P.Long())
errcodeBase = 17000
def __call__(self, state, scope, pos, paramTypes, x, bits):
if bits < 2 or bits > 64:
raise PFARuntimeException("unrepresentable unsigned number", self.errcodeBase + 0, self.name, pos)
y = doUnsigned(x, bits)
maximum = bitsToMax(bits - 1)
if y > maximum - 1:
return y - 2*maximum
else:
return y
provide(ToSigned())
class ToUnsigned(LibFcn):
name = prefix + "unsigned"
sig = Sig([{"x": P.Long()}, {"bits": P.Int()}], P.Long())
errcodeBase = 17010
def __call__(self, state, scope, pos, paramTypes, x, bits):
if bits < 1 or bits > 63:
raise PFARuntimeException("unrepresentable unsigned number", self.errcodeBase + 0, self.name, pos)
return doUnsigned(x, bits)
provide(ToUnsigned())
#################################################################### number precisions
class ToInt(LibFcn):
name = prefix + "int"
sig = Sigs([Sig([{"x": P.Int()}], P.Int()),
Sig([{"x": P.Long()}], P.Int()),
Sig([{"x": P.Float()}], P.Int()),
Sig([{"x": P.Double()}], P.Int())])
errcodeBase = 17020
def __call__(self, state, scope, pos, paramTypes, x):
try:
if isinstance(x, float):
if math.isnan(x):
raise OverflowError
else:
out = int(math.floor(x + 0.5))
else:
out = x
if INT_MIN_VALUE <= out <= INT_MAX_VALUE:
return out
else:
raise OverflowError
except OverflowError:
raise PFARuntimeException("int overflow", self.errcodeBase + 0, self.name, pos)
provide(ToInt())
class ToLong(LibFcn):
name = prefix + "long"
sig = Sigs([Sig([{"x": P.Int()}], P.Long()),
Sig([{"x": P.Long()}], P.Long()),
Sig([{"x": P.Float()}], P.Long()),
Sig([{"x": P.Double()}], P.Long())])
errcodeBase = 17030
def __call__(self, state, scope, pos, paramTypes, x):
try:
if isinstance(x, float):
if math.isnan(x):
raise OverflowError
else:
out = int(math.floor(x + 0.5))
else:
out = x
if LONG_MIN_VALUE <= out <= LONG_MAX_VALUE:
return out
else:
raise OverflowError
except OverflowError:
raise PFARuntimeException("long overflow", self.errcodeBase + 0, self.name, pos)
provide(ToLong())
class ToFloat(LibFcn):
name = prefix + "float"
sig = Sigs([Sig([{"x": P.Int()}], P.Float()),
Sig([{"x": P.Long()}], P.Float()),
Sig([{"x": P.Float()}], P.Float()),
Sig([{"x": P.Double()}], P.Float())])
errcodeBase = 17040
def genpy(self, paramTypes, args, pos):
return "float({0})".format(*args)
def __call__(self, state, scope, pos, paramTypes, x):
return float(x)
provide(ToFloat())
class ToDouble(LibFcn):
name = prefix + "double"
sig = Sigs([Sig([{"x": P.Int()}], P.Double()),
Sig([{"x": P.Long()}], P.Double()),
Sig([{"x": P.Float()}], P.Double()),
Sig([{"x": P.Double()}], P.Double())])
errcodeBase = 17050
def genpy(self, paramTypes, args, pos):
return "float({0})".format(*args)
def __call__(self, state, scope, pos, paramTypes, x):
return float(x)
provide(ToDouble())
#################################################################### fanouts
def fanoutEnum(x, symbols):
return [x == s for s in symbols]
def fanoutString(x, dictionary, outOfRange):
out = [x == s for s in dictionary]
if outOfRange:
return out + [x not in dictionary]
else:
return out
def fanoutInt(x, minimum, maximum, outOfRange):
out = [x == i for i in xrange(minimum, maximum)]
if outOfRange:
return out + [x < minimum or x >= maximum]
else:
return out
class FanoutBoolean(LibFcn):
name = prefix + "fanoutBoolean"
sig = Sigs([Sig([{"x": P.WildEnum("A")}], P.Array(P.Boolean())),
Sig([{"x": P.String()}, {"dictionary": P.Array(P.String())}, {"outOfRange": P.Boolean()}], P.Array(P.Boolean())),
Sig([{"x": P.Int()}, {"minimum": P.Int()}, {"maximum": P.Int()}, {"outOfRange": P.Boolean()}], P.Array(P.Boolean()))])
errcodeBase = 17060
def __call__(self, state, scope, pos, paramTypes, x, *args):
if len(args) == 0:
return fanoutEnum(x, paramTypes[0]["symbols"])
elif len(args) == 2:
if len(args[0]) != len(set(args[0])):
raise PFARuntimeException("non-distinct values in dictionary", self.errcodeBase + 0, self.name, pos)
return fanoutString(x, args[0], args[1])
elif len(args) == 3:
return fanoutInt(x, args[0], args[1], args[2])
provide(FanoutBoolean())
class FanoutInt(LibFcn):
name = prefix + "fanoutInt"
sig = Sigs([Sig([{"x": P.WildEnum("A")}], P.Array(P.Int())),
Sig([{"x": P.String()}, {"dictionary": P.Array(P.String())}, {"outOfRange": P.Boolean()}], P.Array(P.Int())),
Sig([{"x": P.Int()}, {"minimum": P.Int()}, {"maximum": P.Int()}, {"outOfRange": P.Boolean()}], P.Array(P.Int()))])
errcodeBase = 17070
def __call__(self, state, scope, pos, paramTypes, x, *args):
if len(args) == 0:
return [1 if y else 0 for y in fanoutEnum(x, paramTypes[0]["symbols"])]
elif len(args) == 2:
if len(args[0]) != len(set(args[0])):
raise PFARuntimeException("non-distinct values in dictionary", self.errcodeBase + 0, self.name, pos)
return [1 if y else 0 for y in fanoutString(x, args[0], args[1])]
elif len(args) == 3:
return [1 if y else 0 for y in fanoutInt(x, args[0], args[1], args[2])]
provide(FanoutInt())
class FanoutLong(LibFcn):
name = prefix + "fanoutLong"
sig = Sigs([Sig([{"x": P.WildEnum("A")}], P.Array(P.Long())),
Sig([{"x": P.String()}, {"dictionary": P.Array(P.String())}, {"outOfRange": P.Boolean()}], P.Array(P.Long())),
Sig([{"x": P.Int()}, {"minimum": P.Int()}, {"maximum": P.Int()}, {"outOfRange": P.Boolean()}], P.Array(P.Long()))])
errcodeBase = 17080
def __call__(self, state, scope, pos, paramTypes, x, *args):
if len(args) == 0:
return [1 if y else 0 for y in fanoutEnum(x, paramTypes[0]["symbols"])]
elif len(args) == 2:
if len(args[0]) != len(set(args[0])):
raise PFARuntimeException("non-distinct values in dictionary", self.errcodeBase + 0, self.name, pos)
return [1 if y else 0 for y in fanoutString(x, args[0], args[1])]
elif len(args) == 3:
return [1 if y else 0 for y in fanoutInt(x, args[0], args[1], args[2])]
provide(FanoutLong())
class FanoutFloat(LibFcn):
name = prefix + "fanoutFloat"
sig = Sigs([Sig([{"x": P.WildEnum("A")}], P.Array(P.Float())),
Sig([{"x": P.String()}, {"dictionary": P.Array(P.String())}, {"outOfRange": P.Boolean()}], P.Array(P.Float())),
Sig([{"x": P.Int()}, {"minimum": P.Int()}, {"maximum": P.Int()}, {"outOfRange": P.Boolean()}], P.Array(P.Float()))])
errcodeBase = 17090
def __call__(self, state, scope, pos, paramTypes, x, *args):
if len(args) == 0:
return [1.0 if y else 0.0 for y in fanoutEnum(x, paramTypes[0]["symbols"])]
elif len(args) == 2:
if len(args[0]) != len(set(args[0])):
raise PFARuntimeException("non-distinct values in dictionary", self.errcodeBase + 0, self.name, pos)
return [1.0 if y else 0.0 for y in fanoutString(x, args[0], args[1])]
elif len(args) == 3:
return [1.0 if y else 0.0 for y in fanoutInt(x, args[0], args[1], args[2])]
provide(FanoutFloat())
class FanoutDouble(LibFcn):
name = prefix + "fanoutDouble"
sig = Sigs([Sig([{"x": P.WildEnum("A")}], P.Array(P.Double())),
Sig([{"x": P.String()}, {"dictionary": P.Array(P.String())}, {"outOfRange": P.Boolean()}], P.Array(P.Double())),
Sig([{"x": P.Int()}, {"minimum": P.Int()}, {"maximum": P.Int()}, {"outOfRange": P.Boolean()}], P.Array(P.Double()))])
errcodeBase = 17100
def __call__(self, state, scope, pos, paramTypes, x, *args):
if len(args) == 0:
return [1.0 if y else 0.0 for y in fanoutEnum(x, paramTypes[0]["symbols"])]
elif len(args) == 2:
if len(args[0]) != len(set(args[0])):
raise PFARuntimeException("non-distinct values in dictionary", self.errcodeBase + 0, self.name, pos)
return [1.0 if y else 0.0 for y in fanoutString(x, args[0], args[1])]
elif len(args) == 3:
return [1.0 if y else 0.0 for y in fanoutInt(x, args[0], args[1], args[2])]
provide(FanoutDouble())
#################################################################### serialize
class CastAvro(LibFcn):
name = prefix + "avro"
sig = Sig([{"x": P.Wildcard("A")}], P.Bytes())
errcodeBase = 17110
def __call__(self, state, scope, pos, paramTypes, x):
schema = avro.schema.parse(json.dumps(paramTypes[0]))
x = untagUnion(x, paramTypes[0])
bytes = io.BytesIO()
writer = DatumWriter(schema)
writer.write(x, BinaryEncoder(bytes))
bytes.flush()
return bytes.getvalue()
provide(CastAvro())
class CastJson(LibFcn):
name = prefix + "json"
sig = Sig([{"x": P.Wildcard("A")}], P.String())
errcodeBase = 17120
def __call__(self, state, scope, pos, paramTypes, x):
return json.dumps(jsonEncoder(jsonNodeToAvroType(paramTypes[0]), x), separators=(",", ":"))
provide(CastJson())
```
#### File: titus/lib/la.py
```python
import math
from titus.fcn import Fcn
from titus.fcn import LibFcn
from titus.signature import Sig
from titus.signature import Sigs
from titus.datatype import *
from titus.errors import *
from titus.util import callfcn, div
import titus.P as P
provides = {}
def provide(fcn):
provides[fcn.name] = fcn
prefix = "la."
def np():
import numpy
return numpy
def rowKeys(x):
return set(x.keys())
def colKeys(x):
if len(x) == 0:
return set()
else:
return reduce(lambda a, b: a.union(b), [set(xi.keys()) for xi in x.values()])
def arraysToMatrix(x):
return np().matrix(x, dtype=np().double)
def arrayToRowVector(x):
return np().matrix(x, dtype=np().double).T
def rowVectorToArray(x):
return x.T.tolist()[0]
def matrixToArrays(x):
return x.tolist()
def mapsToMatrix(x, rows, cols):
return np().matrix([[x.get(i, {}).get(j, 0.0) for j in cols] for i in rows], dtype=np().double)
def mapToRowVector(x, keys):
return np().matrix([x.get(k, 0.0) for k in keys], dtype=np().double).T
def rowVectorToMap(x, keys):
return dict(zip(keys, x.T.tolist()[0]))
def matrixToMaps(x, rows, cols):
return dict((row, dict(zip(cols, xi))) for row, xi in zip(rows, x.tolist()))
def raggedArray(x):
collens = map(len, x)
return max(collens) != min(collens)
def raggedMap(x):
return len(set(len(xi) for xi in x.values())) != 1
class MapApply(LibFcn):
name = prefix + "map"
sig = Sigs([Sig([{"x": P.Array(P.Array(P.Double()))}, {"fcn": P.Fcn([P.Double()], P.Double())}], P.Array(P.Array(P.Double()))),
Sig([{"x": P.Map(P.Map(P.Double()))}, {"fcn": P.Fcn([P.Double()], P.Double())}], P.Map(P.Map(P.Double())))])
errcodeBase = 24000
def __call__(self, state, scope, pos, paramTypes, x, fcn):
if isinstance(x, (list, tuple)) and all(isinstance(xi, (list, tuple)) for xi in x):
return [[callfcn(state, scope, fcn, [xj]) for xj in xi] for xi in x]
elif isinstance(x, dict) and all(isinstance(x[i], dict) for i in x.keys()):
return dict((i, dict((j, callfcn(state, scope, fcn, [xj])) for j, xj in xi.items())) for i, xi in x.items())
provide(MapApply())
class Scale(LibFcn):
name = prefix + "scale"
sig = Sigs([Sig([{"x": P.Array(P.Double())}, {"alpha": P.Double()}], P.Array(P.Double())),
Sig([{"x": P.Array(P.Array(P.Double()))}, {"alpha": P.Double()}], P.Array(P.Array(P.Double()))),
Sig([{"x": P.Map(P.Double())}, {"alpha": P.Double()}], P.Map(P.Double())),
Sig([{"x": P.Map(P.Map(P.Double()))}, {"alpha": P.Double()}], P.Map(P.Map(P.Double())))])
errcodeBase = 24010
def __call__(self, state, scope, pos, paramTypes, x, alpha):
if isinstance(x, (list, tuple)) and all(isinstance(xi, (list, tuple)) for xi in x):
return [[xj * alpha for xj in xi] for xi in x]
elif isinstance(x, (list, tuple)):
return [xi * alpha for xi in x]
elif isinstance(x, dict) and all(isinstance(x[i], dict) for i in x):
return dict((i, dict((j, xj * alpha) for j, xj in xi.items())) for i, xi in x.items())
else:
return dict((i, xi * alpha) for i, xi in x.items())
provide(Scale())
class ZipMap(LibFcn):
name = prefix + "zipmap"
sig = Sigs([Sig([{"x": P.Array(P.Array(P.Double()))}, {"y": P.Array(P.Array(P.Double()))}, {"fcn": P.Fcn([P.Double(), P.Double()], P.Double())}], P.Array(P.Array(P.Double()))),
Sig([{"x": P.Map(P.Map(P.Double()))}, {"y": P.Map(P.Map(P.Double()))}, {"fcn": P.Fcn([P.Double(), P.Double()], P.Double())}], P.Map(P.Map(P.Double())))])
errcodeBase = 24020
def __call__(self, state, scope, pos, paramTypes, x, y, fcn):
if isinstance(x, (list, tuple)) and all(isinstance(xi, (list, tuple)) for xi in x) and \
isinstance(y, (list, tuple)) and all(isinstance(yi, (list, tuple)) for yi in y):
if len(x) != len(y) or any(len(xi) != len(yi) for xi, yi in zip(x, y)):
raise PFARuntimeException("misaligned matrices", self.errcodeBase + 0, self.name, pos)
return [[callfcn(state, scope, fcn, [xj, yj]) for xj, yj in zip(xi, yi)] for xi, yi in zip(x, y)]
elif isinstance(x, dict) and all(isinstance(x[i], dict) for i in x.keys()) and \
isinstance(y, dict) and all(isinstance(y[i], dict) for i in y.keys()):
rows = rowKeys(x).union(rowKeys(y))
cols = colKeys(x).union(colKeys(y))
return dict((i, dict((j, callfcn(state, scope, fcn, [x.get(i, {}).get(j, 0.0), y.get(i, {}).get(j, 0.0)])) for j in cols)) for i in rows)
provide(ZipMap())
class Add(LibFcn):
name = prefix + "add"
sig = Sigs([Sig([{"x": P.Array(P.Double())}, {"y": P.Array(P.Double())}], P.Array(P.Double())),
Sig([{"x": P.Array(P.Array(P.Double()))}, {"y": P.Array(P.Array(P.Double()))}], P.Array(P.Array(P.Double()))),
Sig([{"x": P.Map(P.Double())}, {"y": P.Map(P.Double())}], P.Map(P.Double())),
Sig([{"x": P.Map(P.Map(P.Double()))}, {"y": P.Map(P.Map(P.Double()))}], P.Map(P.Map(P.Double())))])
errcodeBase = 24030
def __call__(self, state, scope, pos, paramTypes, x, y):
if isinstance(x, (list, tuple)) and all(isinstance(xi, (list, tuple)) for xi in x) and \
isinstance(y, (list, tuple)) and all(isinstance(yi, (list, tuple)) for yi in y):
if len(x) != len(y) or any(len(xi) != len(yi) for xi, yi in zip(x, y)):
raise PFARuntimeException("misaligned matrices", self.errcodeBase + 0, self.name, pos)
return [[xj + yj for xj, yj in zip(xi, yi)] for xi, yi in zip(x, y)]
elif isinstance(x, (list, tuple)) and isinstance(y, (list, tuple)):
if len(x) != len(y):
raise PFARuntimeException("misaligned matrices", self.errcodeBase + 0, self.name, pos)
return [xi + yi for xi, yi in zip(x, y)]
elif isinstance(x, dict) and all(isinstance(x[i], dict) for i in x.keys()) and \
isinstance(y, dict) and all(isinstance(y[i], dict) for i in y.keys()):
rows = rowKeys(x).union(rowKeys(y))
cols = colKeys(x).union(colKeys(y))
return dict((i, dict((j, x.get(i, {}).get(j, 0.0) + y.get(i, {}).get(j, 0.0)) for j in cols)) for i in rows)
else:
rows = rowKeys(x).union(rowKeys(y))
return dict((i, x.get(i, 0.0) + y.get(i, 0.0)) for i in rows)
provide(Add())
class Sub(LibFcn):
name = prefix + "sub"
sig = Sigs([Sig([{"x": P.Array(P.Double())}, {"y": P.Array(P.Double())}], P.Array(P.Double())),
Sig([{"x": P.Array(P.Array(P.Double()))}, {"y": P.Array(P.Array(P.Double()))}], P.Array(P.Array(P.Double()))),
Sig([{"x": P.Map(P.Double())}, {"y": P.Map(P.Double())}], P.Map(P.Double())),
Sig([{"x": P.Map(P.Map(P.Double()))}, {"y": P.Map(P.Map(P.Double()))}], P.Map(P.Map(P.Double())))])
errcodeBase = 24040
def __call__(self, state, scope, pos, paramTypes, x, y):
if isinstance(x, (list, tuple)) and all(isinstance(xi, (list, tuple)) for xi in x) and \
isinstance(y, (list, tuple)) and all(isinstance(yi, (list, tuple)) for yi in y):
if len(x) != len(y) or any(len(xi) != len(yi) for xi, yi in zip(x, y)):
raise PFARuntimeException("misaligned matrices", self.errcodeBase + 0, self.name, pos)
return [[xj - yj for xj, yj in zip(xi, yi)] for xi, yi in zip(x, y)]
elif isinstance(x, (list, tuple)) and isinstance(y, (list, tuple)):
if len(x) != len(y):
raise PFARuntimeException("misaligned matrices", self.errcodeBase + 0, self.name, pos)
return [xi - yi for xi, yi in zip(x, y)]
elif isinstance(x, dict) and all(isinstance(x[i], dict) for i in x.keys()) and \
isinstance(y, dict) and all(isinstance(y[i], dict) for i in y.keys()):
rows = rowKeys(x).union(rowKeys(y))
cols = colKeys(x).union(colKeys(y))
return dict((i, dict((j, x.get(i, {}).get(j, 0.0) - y.get(i, {}).get(j, 0.0)) for j in cols)) for i in rows)
else:
rows = rowKeys(x).union(rowKeys(y))
return dict((i, x.get(i, 0.0) - y.get(i, 0.0)) for i in rows)
provide(Sub())
class Dot(LibFcn):
name = prefix + "dot"
sig = Sigs([Sig([{"x": P.Array(P.Array(P.Double()))}, {"y": P.Array(P.Double())}], P.Array(P.Double())),
Sig([{"x": P.Map(P.Map(P.Double()))}, {"y": P.Map(P.Double())}], P.Map(P.Double())),
Sig([{"x": P.Array(P.Array(P.Double()))}, {"y": P.Array(P.Array(P.Double()))}], P.Array(P.Array(P.Double()))),
Sig([{"x": P.Map(P.Map(P.Double()))}, {"y": P.Map(P.Map(P.Double()))}], P.Map(P.Map(P.Double())))])
errcodeBase = 24050
def __call__(self, state, scope, pos, paramTypes, x, y):
if paramTypes[1]["type"] == "array":
if isinstance(paramTypes[1]["items"], dict) and paramTypes[1]["items"]["type"] == "array":
# array matrix-matrix case
bad = any(any(math.isnan(z) or math.isinf(z) for z in row) for row in x) or \
any(any(math.isnan(z) or math.isinf(z) for z in row) for row in y)
xmat = arraysToMatrix(x)
ymat = arraysToMatrix(y)
if xmat.shape[0] == 0 or xmat.shape[1] == 0 or ymat.shape[0] == 0 or ymat.shape[1] == 0:
raise PFARuntimeException("too few rows/cols", self.errcodeBase + 1, self.name, pos)
try:
if bad: raise PFARuntimeException("contains non-finite value", self.errcodeBase + 2, self.name, pos)
return matrixToArrays(np().dot(xmat, ymat))
except ValueError:
raise PFARuntimeException("misaligned matrices", self.errcodeBase + 0, self.name, pos)
else:
# array matrix-vector case
bad = any(any(math.isnan(z) or math.isinf(z) for z in row) for row in x) or \
any(math.isnan(z) or math.isinf(z) for z in y)
xmat = arraysToMatrix(x)
ymat = arrayToRowVector(y)
if xmat.shape[0] == 0 or xmat.shape[1] == 0 or ymat.shape[0] == 0 or ymat.shape[1] == 0:
raise PFARuntimeException("too few rows/cols", self.errcodeBase + 1, self.name, pos)
try:
if bad: raise PFARuntimeException("contains non-finite value", self.errcodeBase + 2, self.name, pos)
return rowVectorToArray(np().dot(xmat, ymat))
except ValueError:
raise PFARuntimeException("misaligned matrices", self.errcodeBase + 0, self.name, pos)
elif paramTypes[1]["type"] == "map":
if isinstance(paramTypes[1]["values"], dict) and paramTypes[1]["values"]["type"] == "map":
# map matrix-matrix case
bad = any(any(math.isnan(z) or math.isinf(z) for z in row.values()) for row in x.values()) or \
any(any(math.isnan(z) or math.isinf(z) for z in row.values()) for row in y.values())
rows = list(rowKeys(x))
inter = list(colKeys(x).union(rowKeys(y)))
cols = list(colKeys(y))
xmat = mapsToMatrix(x, rows, inter)
ymat = mapsToMatrix(y, inter, cols)
if xmat.shape[0] == 0 or xmat.shape[1] == 0 or ymat.shape[0] == 0 or ymat.shape[1] == 0:
raise PFARuntimeException("too few rows/cols", self.errcodeBase + 1, self.name, pos)
if bad: raise PFARuntimeException("contains non-finite value", self.errcodeBase + 2, self.name, pos)
return matrixToMaps(np().dot(xmat, ymat), rows, cols)
else:
# map matrix-vector case
bad = any(any(math.isnan(z) or math.isinf(z) for z in row.values()) for row in x.values()) or \
any(math.isnan(z) or math.isinf(z) for z in y.values())
rows = list(rowKeys(x))
cols = list(colKeys(x).union(rowKeys(y)))
xmat = mapsToMatrix(x, rows, cols)
ymat = mapToRowVector(y, cols)
if xmat.shape[0] == 0 or xmat.shape[1] == 0 or ymat.shape[0] == 0 or ymat.shape[1] == 0:
raise PFARuntimeException("too few rows/cols", self.errcodeBase + 1, self.name, pos)
if bad: raise PFARuntimeException("contains non-finite value", self.errcodeBase + 2, self.name, pos)
return rowVectorToMap(np().dot(xmat, ymat), rows)
provide(Dot())
class Transpose(LibFcn):
name = prefix + "transpose"
sig = Sigs([Sig([{"x": P.Array(P.Array(P.Double()))}], P.Array(P.Array(P.Double()))),
Sig([{"x": P.Map(P.Map(P.Double()))}], P.Map(P.Map(P.Double())))])
errcodeBase = 24060
def __call__(self, state, scope, pos, paramTypes, x):
if isinstance(x, (list, tuple)) and all(isinstance(xi, (list, tuple)) for xi in x):
rows = len(x)
if rows < 1:
raise PFARuntimeException("too few rows/cols", self.errcodeBase + 0, self.name, pos)
cols = len(x[0])
if cols < 1:
raise PFARuntimeException("too few rows/cols", self.errcodeBase + 0, self.name, pos)
if raggedArray(x):
raise PFARuntimeException("ragged columns", self.errcodeBase + 1, self.name, pos)
return [[x[r][c] for r in xrange(rows)] for c in xrange(cols)]
elif isinstance(x, dict) and all(isinstance(x[i], dict) for i in x.keys()):
rows = rowKeys(x)
cols = colKeys(x)
if len(rows) < 1 or len(cols) < 1:
raise PFARuntimeException("too few rows/cols", self.errcodeBase + 0, self.name, pos)
if raggedMap(x):
raise PFARuntimeException("ragged columns", self.errcodeBase + 1, self.name, pos)
return dict((c, dict((r, x[r][c]) for r in rows)) for c in cols)
provide(Transpose())
class Inverse(LibFcn):
name = prefix + "inverse"
sig = Sigs([Sig([{"x": P.Array(P.Array(P.Double()))}], P.Array(P.Array(P.Double()))),
Sig([{"x": P.Map(P.Map(P.Double()))}], P.Map(P.Map(P.Double())))])
errcodeBase = 24070
def __call__(self, state, scope, pos, paramTypes, x):
if isinstance(x, (list, tuple)) and all(isinstance(xi, (list, tuple)) for xi in x):
rows = len(x)
if rows < 1:
raise PFARuntimeException("too few rows/cols", self.errcodeBase + 0, self.name, pos)
cols = len(x[0])
if cols < 1:
raise PFARuntimeException("too few rows/cols", self.errcodeBase + 0, self.name, pos)
if raggedArray(x):
raise PFARuntimeException("ragged columns", self.errcodeBase + 1, self.name, pos)
return matrixToArrays(arraysToMatrix(x).I)
elif isinstance(x, dict) and all(isinstance(x[i], dict) for i in x.keys()):
rows = list(rowKeys(x))
cols = list(colKeys(x))
if len(rows) < 1 or len(cols) < 1:
raise PFARuntimeException("too few rows/cols", self.errcodeBase + 0, self.name, pos)
xmat = mapsToMatrix(x, rows, cols)
return matrixToMaps(xmat.I, cols, rows)
provide(Inverse())
class Trace(LibFcn):
name = prefix + "trace"
sig = Sigs([Sig([{"x": P.Array(P.Array(P.Double()))}], P.Double()),
Sig([{"x": P.Map(P.Map(P.Double()))}], P.Double())])
errcodeBase = 24080
def __call__(self, state, scope, pos, paramTypes, x):
if isinstance(x, (list, tuple)) and all(isinstance(xi, (list, tuple)) for xi in x):
rows = len(x)
if rows == 0:
return 0.0
else:
cols = len(x[0])
if raggedArray(x):
raise PFARuntimeException("ragged columns", self.errcodeBase + 0, self.name, pos)
return sum(x[i][i] for i in xrange(min(rows, cols)))
elif isinstance(x, dict) and all(isinstance(x[i], dict) for i in x.keys()):
keys = rowKeys(x).intersection(colKeys(x))
return sum(x[i][i] for i in keys)
provide(Trace())
class Det(LibFcn):
name = prefix + "det"
sig = Sigs([Sig([{"x": P.Array(P.Array(P.Double()))}], P.Double()),
Sig([{"x": P.Map(P.Map(P.Double()))}], P.Double())])
errcodeBase = 24090
def __call__(self, state, scope, pos, paramTypes, x):
if isinstance(x, (list, tuple)) and all(isinstance(xi, (list, tuple)) for xi in x):
rows = len(x)
if rows < 1:
raise PFARuntimeException("too few rows/cols", self.errcodeBase + 0, self.name, pos)
cols = len(x[0])
if cols < 1:
raise PFARuntimeException("too few rows/cols", self.errcodeBase + 0, self.name, pos)
if raggedArray(x):
raise PFARuntimeException("ragged columns", self.errcodeBase + 1, self.name, pos)
if rows != cols:
raise PFARuntimeException("non-square matrix", self.errcodeBase + 2, self.name, pos)
if any(any(math.isnan(z) or math.isinf(z) for z in row) for row in x):
return float("nan")
else:
return float(np().linalg.det(arraysToMatrix(x)))
elif isinstance(x, dict) and all(isinstance(x[i], dict) for i in x.keys()):
keys = list(rowKeys(x).union(colKeys(x)))
if len(keys) < 1 or all(len(row) == 0 for row in x.values()):
raise PFARuntimeException("too few rows/cols", self.errcodeBase + 0, self.name, pos)
if any(any(math.isnan(z) or math.isinf(z) for z in row.values()) for row in x.values()):
return float("nan")
else:
return float(np().linalg.det(mapsToMatrix(x, keys, keys)))
provide(Det())
class Symmetric(LibFcn):
name = prefix + "symmetric"
sig = Sigs([Sig([{"x": P.Array(P.Array(P.Double()))}, {"tol": P.Double()}], P.Boolean()),
Sig([{"x": P.Map(P.Map(P.Double()))}, {"tol": P.Double()}], P.Boolean())])
errcodeBase = 24100
@staticmethod
def same(x, y, tol):
if math.isinf(x) and math.isinf(y) and ((x > 0.0 and y > 0.0) or (x < 0.0 and y < 0.0)):
return True
elif math.isnan(x) and math.isnan(y):
return True
elif not math.isinf(x) and not math.isnan(x) and not math.isinf(y) and not math.isnan(y):
return abs(x - y) < tol
else:
return False
def __call__(self, state, scope, pos, paramTypes, x, tol):
if isinstance(x, (list, tuple)) and all(isinstance(xi, (list, tuple)) for xi in x):
rows = len(x)
if rows < 1:
raise PFARuntimeException("too few rows/cols", self.errcodeBase + 0, self.name, pos)
cols = len(x[0])
if cols < 1:
raise PFARuntimeException("too few rows/cols", self.errcodeBase + 0, self.name, pos)
if raggedArray(x):
raise PFARuntimeException("ragged columns", self.errcodeBase + 1, self.name, pos)
if rows != cols:
raise PFARuntimeException("non-square matrix", self.errcodeBase + 2, self.name, pos)
return all(all(self.same(x[i][j], x[j][i], tol) for j in xrange(cols)) for i in xrange(rows))
elif isinstance(x, dict) and all(isinstance(x[i], dict) for i in x.keys()):
keys = list(rowKeys(x).union(colKeys(x)))
if len(keys) < 1 or all(len(row) == 0 for row in x.values()):
raise PFARuntimeException("too few rows/cols", self.errcodeBase + 0, self.name, pos)
return all(all(self.same(x.get(i, {}).get(j, 0.0), x.get(j, {}).get(i, 0.0), tol) for j in keys) for i in keys)
provide(Symmetric())
class EigenBasis(LibFcn):
name = prefix + "eigenBasis"
sig = Sigs([Sig([{"x": P.Array(P.Array(P.Double()))}], P.Array(P.Array(P.Double()))),
Sig([{"x": P.Map(P.Map(P.Double()))}], P.Map(P.Map(P.Double())))])
errcodeBase = 24110
def calculate(self, x, size):
symm = (x + x.T) * 0.5
evals, evects = np().linalg.eig(symm)
evects = np().array(evects)
evects2 = [evects[:,i] * (-1.0 if evects[0,i] < 0.0 else 1.0) for i in xrange(size)]
eigvalm2 = [div(1.0, math.sqrt(abs(ei))) for ei in evals]
order = np().argsort(eigvalm2)
out = np().empty((size, size), dtype=np().double)
for i in xrange(size):
for j in xrange(size):
out[i,j] = evects2[order[i]][j] * eigvalm2[order[i]]
return out
def __call__(self, state, scope, pos, paramTypes, x):
if isinstance(x, (list, tuple)) and all(isinstance(xi, (list, tuple)) for xi in x):
rows = len(x)
if rows < 1:
raise PFARuntimeException("too few rows/cols", self.errcodeBase + 0, self.name, pos)
cols = len(x[0])
if cols < 1:
raise PFARuntimeException("too few rows/cols", self.errcodeBase + 0, self.name, pos)
if raggedArray(x):
raise PFARuntimeException("ragged columns", self.errcodeBase + 1, self.name, pos)
if rows != cols:
raise PFARuntimeException("non-square matrix", self.errcodeBase + 2, self.name, pos)
if any(any(math.isnan(z) or math.isinf(z) for z in row) for row in x):
raise PFARuntimeException("non-finite matrix", self.errcodeBase + 3, self.name, pos)
return matrixToArrays(self.calculate(arraysToMatrix(x), rows))
elif isinstance(x, dict) and all(isinstance(x[i], dict) for i in x.keys()):
keys = list(rowKeys(x).union(colKeys(x)))
if len(keys) < 1 or all(len(z) == 0 for z in x.values()):
raise PFARuntimeException("too few rows/cols", self.errcodeBase + 0, self.name, pos)
if any(any(math.isnan(z) or math.isinf(z) for z in row.values()) for row in x.values()):
raise PFARuntimeException("non-finite matrix", self.errcodeBase + 3, self.name, pos)
return matrixToMaps(self.calculate(mapsToMatrix(x, keys, keys), len(keys)), map(str, xrange(len(keys))), keys)
provide(EigenBasis())
class Truncate(LibFcn):
name = prefix + "truncate"
sig = Sigs([Sig([{"x": P.Array(P.Array(P.Double()))}, {"keep": P.Int()}], P.Array(P.Array(P.Double()))),
Sig([{"x": P.Map(P.Map(P.Double()))}, {"keep": P.Array(P.String())}], P.Map(P.Map(P.Double())))])
errcodeBase = 24120
def __call__(self, state, scope, pos, paramTypes, x, keep):
if keep < 0:
keep = 0
if isinstance(x, (list, tuple)) and all(isinstance(xi, (list, tuple)) for xi in x):
rows = len(x)
if rows < 1:
raise PFARuntimeException("too few rows/cols", self.errcodeBase + 0, self.name, pos)
cols = len(x[0])
if cols < 1:
raise PFARuntimeException("too few rows/cols", self.errcodeBase + 0, self.name, pos)
if raggedArray(x):
raise PFARuntimeException("ragged columns", self.errcodeBase + 1, self.name, pos)
return x[:keep]
elif isinstance(x, dict) and all(isinstance(x[i], dict) for i in x.keys()):
rows = rowKeys(x)
cols = colKeys(x)
if len(rows) < 1 or len(cols) < 1:
raise PFARuntimeException("too few rows/cols", self.errcodeBase + 0, self.name, pos)
return dict((k, x[k]) for k in rows if k in keep)
provide(Truncate())
```
#### File: lib/prob/dist.py
```python
import math
from titus.fcn import Fcn
from titus.fcn import LibFcn
from titus.signature import Sig
from titus.signature import Sigs
from titus.datatype import *
from titus.errors import *
import titus.P as P
# import special functions requred to compute distributions
from titus.lib.spec import logBetaFunction
from titus.lib.spec import incompleteBetaFunction
from titus.lib.spec import inverseIncompleteBetaFunction
from titus.lib.spec import regularizedGammaQ
from titus.lib.spec import regularizedGammaP
from titus.lib.spec import nChooseK
provides = {}
def provide(fcn):
provides[fcn.name] = fcn
prefix = "prob.dist."
class GaussianLL(LibFcn):
name = prefix + "gaussianLL"
sig = Sigs([Sig([{"x": P.Double()}, {"mu": P.Double()}, {"sigma": P.Double()}], P.Double()),
Sig([{"x": P.Double()}, {"params": P.WildRecord("A", {"mean": P.Double(), "variance": P.Double()})}], P.Double())])
errcodeBase = 13000
def __call__(self, state, scope, pos, paramTypes, x, *others):
if len(others) == 2:
mu, sigma = others
else:
mu = others[0]["mean"]
if math.isnan(others[0]["variance"]) or others[0]["variance"] < 0.0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
else:
sigma = math.sqrt(others[0]["variance"])
if math.isinf(mu) or math.isnan(mu) or math.isinf(sigma) or math.isnan(sigma) or sigma < 0.0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif math.isinf(x) or math.isnan(x):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif sigma == 0.0:
if x != mu:
return float("-inf")
else:
return float("inf")
else:
return GaussianDistribution(mu, sigma, self.errcodeBase + 0, self.name, pos).LL(x)
provide(GaussianLL())
class GaussianCDF(LibFcn):
name = prefix + "gaussianCDF"
sig = Sigs([Sig([{"x": P.Double()}, {"mu": P.Double()}, {"sigma": P.Double()}], P.Double()),
Sig([{"x": P.Double()}, {"params": P.WildRecord("A", {"mean": P.Double(), "variance": P.Double()})}], P.Double())])
errcodeBase = 13010
def __call__(self, state, scope, pos, paramTypes, x, *others):
if len(others) == 2:
mu, sigma = others
else:
mu = others[0]["mean"]
if math.isnan(others[0]["variance"]) or others[0]["variance"] < 0.0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
else:
sigma = math.sqrt(others[0]["variance"])
if math.isinf(mu) or math.isnan(mu) or math.isinf(sigma) or math.isnan(sigma) or sigma < 0.0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif math.isinf(x) or math.isnan(x):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif sigma == 0.0:
if x < mu:
return 0.0
else:
return 1.0
else:
return GaussianDistribution(mu, sigma, self.errcodeBase + 0, self.name, pos).CDF(x)
provide(GaussianCDF())
# written using http://www.johndcook.com/normal_cdf_inverse.html
class GaussianQF(LibFcn):
name = prefix + "gaussianQF"
sig = Sigs([Sig([{"p": P.Double()}, {"mu": P.Double()}, {"sigma": P.Double()}], P.Double()),
Sig([{"p": P.Double()}, {"params": P.WildRecord("A", {"mean": P.Double(), "variance": P.Double()})}], P.Double())])
errcodeBase = 13020
def __call__(self, state, scope, pos, paramTypes, p, *others):
if len(others) == 2:
mu, sigma = others
else:
mu = others[0]["mean"]
if math.isnan(others[0]["variance"]) or others[0]["variance"] < 0.0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
else:
sigma = math.sqrt(others[0]["variance"])
if math.isinf(mu) or math.isnan(mu) or math.isinf(sigma) or math.isnan(sigma) or sigma < 0.0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif not (0.0 <= p <= 1.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif p == 1.0:
return float("inf")
elif p == 0.0:
return float("-inf")
elif sigma == 0.0:
return mu
else:
return GaussianDistribution(mu, sigma, self.errcodeBase + 0, self.name, pos).QF(p)
provide(GaussianQF())
################ Exponential
class ExponentialPDF(LibFcn):
name = prefix + "exponentialPDF"
sig = Sig([{"x": P.Double()}, {"lambda": P.Double()}], P.Double())
errcodeBase = 13030
def __call__(self, state, scope, pos, paramTypes, x, rate):
if math.isinf(rate) or math.isnan(rate) or rate < 0.0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif math.isinf(x) or math.isnan(x):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif rate == 0.0:
return 0.0
elif x < 0.0:
return 0.0
elif x == 0.0:
return rate
else:
return ExponentialDistribution(rate, self.errcodeBase + 0, self.name, pos).PDF(x)
provide(ExponentialPDF())
class ExponentialCDF(LibFcn):
name = prefix + "exponentialCDF"
sig = Sig([{"x": P.Double()}, {"lambda": P.Double()}], P.Double())
errcodeBase = 13040
def __call__(self, state, scope, pos, paramTypes, x, rate):
if math.isinf(rate) or math.isnan(rate) or rate < 0.0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif math.isinf(x) or math.isnan(x):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif rate == 0.0 or x == 0.0:
return 0.0
elif x <= 0.0:
return 0.0
else:
return ExponentialDistribution(rate, self.errcodeBase + 0, self.name, pos).CDF(x)
provide(ExponentialCDF())
class ExponentialQF(LibFcn):
name = prefix + "exponentialQF"
sig = Sig([{"p": P.Double()}, {"lambda": P.Double()}], P.Double())
errcodeBase = 13050
def __call__(self, state, scope, pos, paramTypes, p, rate):
if math.isinf(rate) or math.isnan(rate) or rate < 0.0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif not (0.0 <= p <= 1.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif rate == 0.0 and p == 0.0:
return 0.0
elif rate == 0.0 and p > 0:
return float("inf")
elif p == 1.0:
return float("inf")
elif p == 0.0:
return 0.0
else:
return ExponentialDistribution(rate, self.errcodeBase + 0, self.name, pos).QF(p)
provide(ExponentialQF())
################ Chi2
class Chi2PDF(LibFcn):
name = prefix + "chi2PDF"
sig = Sig([{"x": P.Double()}, {"dof": P.Int()}], P.Double())
errcodeBase = 13060
def __call__(self, state, scope, pos, paramTypes, x, df):
if df < 0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif math.isinf(x) or math.isnan(x):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif df == 0:
if x != 0:
return 0.0
else:
return float("inf")
elif x <= 0.0:
return 0.0
else:
return Chi2Distribution(df, self.errcodeBase + 0, self.name, pos).PDF(x)
provide(Chi2PDF())
class Chi2CDF(LibFcn):
name = prefix + "chi2CDF"
sig = Sig([{"x": P.Double()}, {"dof": P.Int()}], P.Double())
errcodeBase = 13070
def __call__(self, state, scope, pos, paramTypes, x, df):
if df < 0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif math.isinf(x) or math.isnan(x):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif df == 0:
if x > 0:
return 1.0
else:
return 0.0
else:
return Chi2Distribution(df, self.errcodeBase + 0, self.name, pos).CDF(x)
provide(Chi2CDF())
class Chi2QF(LibFcn):
name = prefix + "chi2QF"
sig = Sig([{"p": P.Double()}, {"dof": P.Int()}], P.Double())
errcodeBase = 13080
def __call__(self, state, scope, pos, paramTypes, p, df):
if df < 0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif not (0.0 <= p <= 1.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif p == 1.0:
return float("inf")
elif df == 0:
return 0.0
elif p == 0.0:
return 0.0
else:
return Chi2Distribution(df, self.errcodeBase + 0, self.name, pos).QF(p)
provide(Chi2QF())
################ Poisson #######################################
class PoissonPDF(LibFcn):
name = prefix + "poissonPDF"
sig = Sig([{"x": P.Int()}, {"lambda": P.Double()}], P.Double())
errcodeBase = 13090
def __call__(self, state, scope, pos, paramTypes, x, lamda):
if math.isinf(lamda) or math.isnan(lamda) or lamda < 0.0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif lamda == 0:
if x != 0:
return 0.0
else:
return 1.0
elif x < 0:
return 0.0
else:
return PoissonDistribution(lamda, self.errcodeBase + 0, self.name, pos).PDF(x)
provide(PoissonPDF())
class PoissonCDF(LibFcn):
name = prefix + "poissonCDF"
sig = Sig([{"x": P.Int()}, {"lambda": P.Double()}], P.Double())
errcodeBase = 13100
def __call__(self, state, scope, pos, paramTypes, x, lamda):
if math.isinf(lamda) or math.isnan(lamda) or lamda < 0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif math.isinf(x) or math.isnan(x):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif lamda == 0:
if x >= 0:
return 1.0
else:
return 0.0
else:
return PoissonDistribution(lamda, self.errcodeBase + 0, self.name, pos).CDF(x)
provide(PoissonCDF())
class PoissonQF(LibFcn):
name = prefix + "poissonQF"
sig = Sig([{"p": P.Double()}, {"lambda": P.Double()}], P.Double())
errcodeBase = 13110
def __call__(self, state, scope, pos, paramTypes, p, lamda):
if math.isinf(lamda) or math.isnan(lamda) or lamda < 0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif not (0.0 <= p <= 1.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif lamda == 0:
return 0.0
elif p == 1:
return float("inf")
elif p == 0:
return 0.0
else:
return PoissonDistribution(lamda, self.errcodeBase + 0, self.name, pos).QF(p)
provide(PoissonQF())
################ Gamma
class GammaPDF(LibFcn):
name = prefix + "gammaPDF"
sig = Sig([{"x": P.Double()}, {"shape": P.Double()}, {"scale": P.Double()}], P.Double())
errcodeBase = 13120
def __call__(self, state, scope, pos, paramTypes, x, shape, scale):
if math.isinf(shape) or math.isnan(shape) or math.isinf(scale) or math.isnan(scale) or shape < 0 or scale < 0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif math.isinf(x) or math.isnan(x):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif shape == 0 or scale == 0:
if x != 0:
return 0.0
else:
return float("inf")
elif x < 0:
return 0.0
elif x == 0:
if shape < 1:
return float("inf")
elif shape == 1:
return 1.0/scale
else:
return 0.0
else:
return GammaDistribution(shape, scale, self.errcodeBase + 0, self.name, pos).PDF(x)
provide(GammaPDF())
class GammaCDF(LibFcn):
name = prefix + "gammaCDF"
sig = Sig([{"x": P.Double()}, {"shape": P.Double()}, {"scale": P.Double()}], P.Double())
errcodeBase = 13130
def __call__(self, state, scope, pos, paramTypes, x, shape, scale):
if math.isinf(shape) or math.isnan(shape) or math.isinf(scale) or math.isnan(scale) or shape < 0 or scale < 0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif math.isinf(x) or math.isnan(x):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif shape == 0 or scale == 0:
if x != 0:
return 1.0
else:
return 0.0
elif x < 0:
return 0.0
else:
return GammaDistribution(shape, scale, self.errcodeBase + 0, self.name, pos).CDF(x)
provide(GammaCDF())
class GammaQF(LibFcn):
name = prefix + "gammaQF"
sig = Sig([{"p": P.Double()}, {"shape": P.Double()}, {"scale": P.Double()}], P.Double())
errcodeBase = 13140
def __call__(self, state, scope, pos, paramTypes, p, shape, scale):
if math.isinf(shape) or math.isnan(shape) or math.isinf(scale) or math.isnan(scale) or shape <= 0 or scale <= 0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif not (0.0 <= p <= 1.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif p == 1.0:
return float("inf")
elif p == 0.0:
return 0.0
else:
return GammaDistribution(shape, scale, self.errcodeBase + 0, self.name, pos).QF(p)
provide(GammaQF())
################ Beta
class BetaPDF(LibFcn):
name = prefix + "betaPDF"
sig = Sig([{"x": P.Double()}, {"a": P.Double()}, {"b": P.Double()}], P.Double())
errcodeBase = 13150
def __call__(self, state, scope, pos, paramTypes, x, shape1, shape2):
if math.isinf(shape1) or math.isnan(shape1) or math.isinf(shape2) or math.isnan(shape2) or shape1 <= 0 or shape2 <= 0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif math.isinf(x) or math.isnan(x):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif x <= 0 or x >= 1:
return 0.0
else:
return BetaDistribution(shape1, shape2, self.errcodeBase + 0, self.name, pos).PDF(x)
provide(BetaPDF())
class BetaCDF(LibFcn):
name = prefix + "betaCDF"
sig = Sig([{"x": P.Double()}, {"a": P.Double()}, {"b": P.Double()}], P.Double())
errcodeBase = 13160
def __call__(self, state, scope, pos, paramTypes, x, shape1, shape2):
if math.isinf(shape1) or math.isnan(shape1) or math.isinf(shape2) or math.isnan(shape2) or shape1 <= 0 or shape2 <= 0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif math.isinf(x) or math.isnan(x):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif x <= 0:
return 0.0
elif x >= 1:
return 1.0
else:
return BetaDistribution(shape1, shape2, self.errcodeBase + 0, self.name, pos).CDF(x)
provide(BetaCDF())
class BetaQF(LibFcn):
name = prefix + "betaQF"
sig = Sig([{"p": P.Double()}, {"a": P.Double()}, {"b": P.Double()}], P.Double())
errcodeBase = 13170
def __call__(self, state, scope, pos, paramTypes, p, shape1, shape2):
if math.isinf(shape1) or math.isnan(shape1) or math.isinf(shape2) or math.isnan(shape2) or shape1 <= 0 or shape2 <= 0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif not (0.0 <= p <= 1.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif p == 1:
return 1.0
elif p == 0:
return 0.0
else:
return BetaDistribution(shape1, shape2, self.errcodeBase + 0, self.name, pos).QF(p)
provide(BetaQF())
################ Cauchy
class CauchyPDF(LibFcn):
name = prefix + "cauchyPDF"
sig = Sig([{"x": P.Double()}, {"location": P.Double()}, {"scale": P.Double()}], P.Double())
errcodeBase = 13180
def __call__(self, state, scope, pos, paramTypes, x, location, scale):
if math.isinf(location) or math.isnan(location) or math.isinf(scale) or math.isnan(scale) or scale <= 0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif math.isinf(x) or math.isnan(x):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
else:
return CauchyDistribution(location, scale, self.errcodeBase + 0, self.name, pos).PDF(x)
provide(CauchyPDF())
class CauchyCDF(LibFcn):
name = prefix + "cauchyCDF"
sig = Sig([{"x": P.Double()}, {"location": P.Double()}, {"scale": P.Double()}], P.Double())
errcodeBase = 13190
def __call__(self, state, scope, pos, paramTypes, x, location, scale):
if math.isinf(location) or math.isnan(location) or math.isinf(scale) or math.isnan(scale) or scale <= 0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif math.isinf(x) or math.isnan(x):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
else:
return CauchyDistribution(location, scale, self.errcodeBase + 0, self.name, pos).CDF(x)
provide(CauchyCDF())
class CauchyQF(LibFcn):
name = prefix + "cauchyQF"
sig = Sig([{"p": P.Double()}, {"location": P.Double()}, {"scale": P.Double()}], P.Double())
errcodeBase = 13200
def __call__(self, state, scope, pos, paramTypes, p, location, scale):
if math.isinf(location) or math.isnan(location) or math.isinf(scale) or math.isnan(scale) or scale <= 0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif not (0.0 <= p <= 1.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif p == 1:
return float("inf")
elif p == 0:
return float("-inf")
else:
return CauchyDistribution(location, scale, self.errcodeBase + 0, self.name, pos).QF(p)
provide(CauchyQF())
################ F
class FPDF(LibFcn):
name = prefix + "fPDF"
sig = Sig([{"x": P.Double()}, {"d1": P.Int()}, {"d2": P.Int()}], P.Double())
errcodeBase = 13210
def __call__(self, state, scope, pos, paramTypes, x, d1, d2):
if d2 <= 0 or d1 <= 0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif math.isinf(x) or math.isnan(x):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif x <= 0:
return 0.0
else:
return FDistribution(d1, d2, self.errcodeBase + 0, self.name, pos).PDF(x)
provide(FPDF())
class FCDF(LibFcn):
name = prefix + "fCDF"
sig = Sig([{"x": P.Double()}, {"d1": P.Int()}, {"d2": P.Int()}], P.Double())
errcodeBase = 13220
def __call__(self, state, scope, pos, paramTypes, x, d1, d2):
if d2 <= 0 or d1 <= 0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif math.isinf(x) or math.isnan(x):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
else:
return FDistribution(d1, d2, self.errcodeBase + 0, self.name, pos).CDF(x)
provide(FCDF())
class FQF(LibFcn):
name = prefix + "fQF"
sig = Sig([{"p": P.Double()}, {"d1": P.Int()}, {"d2": P.Int()}], P.Double())
errcodeBase = 13230
def __call__(self, state, scope, pos, paramTypes, p, d1, d2):
if d1 <= 0 or d2 <= 0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif not (0.0 <= p <= 1):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif p == 1:
return float("inf")
else:
return FDistribution(d1, d2, self.errcodeBase + 0, self.name, pos).QF(p)
provide(FQF())
################ Lognormal
class LognormalPDF(LibFcn):
name = prefix + "lognormalPDF"
sig = Sig([{"x": P.Double()}, {"meanlog": P.Double()}, {"sdlog": P.Double()}], P.Double())
errcodeBase = 13240
def __call__(self, state, scope, pos, paramTypes, x, meanlog, sdlog):
if math.isinf(meanlog) or math.isnan(meanlog) or math.isinf(sdlog) or math.isnan(sdlog) or sdlog <= 0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif math.isinf(x) or math.isnan(x):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
else:
return LognormalDistribution(meanlog, sdlog, self.errcodeBase + 0, self.name, pos).PDF(x)
provide(LognormalPDF())
class LognormalCDF(LibFcn):
name = prefix + "lognormalCDF"
sig = Sig([{"x": P.Double()}, {"meanlog": P.Double()}, {"sdlog": P.Double()}], P.Double())
errcodeBase = 13250
def __call__(self, state, scope, pos, paramTypes, x, meanlog, sdlog):
if math.isinf(meanlog) or math.isnan(meanlog) or math.isinf(sdlog) or math.isnan(sdlog) or sdlog <= 0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif math.isinf(x) or math.isnan(x):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
else:
return LognormalDistribution(meanlog, sdlog, self.errcodeBase + 0, self.name, pos).CDF(x)
provide(LognormalCDF())
class LognormalQF(LibFcn):
name = prefix + "lognormalQF"
sig = Sig([{"p": P.Double()}, {"meanlog": P.Double()}, {"sdlog": P.Double()}], P.Double())
errcodeBase = 13260
def __call__(self, state, scope, pos, paramTypes, p, meanlog, sdlog):
if math.isinf(meanlog) or math.isnan(meanlog) or math.isinf(sdlog) or math.isnan(sdlog) or sdlog <= 0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif not (0.0 <= p <= 1.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif p == 1:
return float("inf")
else:
return LognormalDistribution(meanlog, sdlog, self.errcodeBase + 0, self.name, pos).QF(p)
provide(LognormalQF())
################ T
class TPDF(LibFcn):
name = prefix + "tPDF"
sig = Sig([{"x": P.Double()}, {"dof": P.Int()}], P.Double())
errcodeBase = 13270
def __call__(self, state, scope, pos, paramTypes, x, df):
if df <= 0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif math.isinf(x) or math.isnan(x):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
else:
return TDistribution(df, self.errcodeBase + 0, self.name, pos).PDF(x)
provide(TPDF())
class TCDF(LibFcn):
name = prefix + "tCDF"
sig = Sig([{"x": P.Double()}, {"dof": P.Int()}], P.Double())
errcodeBase = 13280
def __call__(self, state, scope, pos, paramTypes, x, df):
if df <= 0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif math.isinf(x) or math.isnan(x):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
else:
return TDistribution(df, self.errcodeBase + 0, self.name, pos).CDF(x)
provide(TCDF())
class TQF(LibFcn):
name = prefix + "tQF"
sig = Sig([{"p": P.Double()}, {"dof": P.Int()}], P.Double())
errcodeBase = 13290
def __call__(self, state, scope, pos, paramTypes, p, df):
if df <= 0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif not (0.0 <= p <= 1.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif p == 1:
return float("inf")
elif p == 0:
return float("-inf")
else:
return TDistribution(df, self.errcodeBase + 0, self.name, pos).QF(p)
provide(TQF())
################ Binomial
class BinomialPDF(LibFcn):
name = prefix + "binomialPDF"
sig = Sig([{"x": P.Int()}, {"size": P.Int()}, {"prob": P.Double()}], P.Double())
errcodeBase = 13300
def __call__(self, state, scope, pos, paramTypes, x, size, prob):
if math.isinf(prob) or math.isnan(prob) or size <= 0 or prob < 0 or prob > 1:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif math.isinf(x) or math.isnan(x):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif x < 0:
return 0.0
elif x >= size:
return 0.0
else:
return BinomialDistribution(size, prob, self.errcodeBase + 0, self.name, pos).PDF(x)
provide(BinomialPDF())
class BinomialCDF(LibFcn):
name = prefix + "binomialCDF"
sig = Sig([{"x": P.Double()}, {"size": P.Int()}, {"prob": P.Double()}], P.Double())
errcodeBase = 13310
def __call__(self, state, scope, pos, paramTypes, x, size, prob):
if math.isinf(prob) or math.isnan(prob) or size <= 0 or prob < 0 or prob > 1:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif math.isinf(x) or math.isnan(x):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif x < 0:
return 0.0
elif x >= size:
return 1.0
elif prob == 1:
if x < size:
return 0.0
else:
return 1.0
elif prob == 0:
return 1.0
else:
return BinomialDistribution(size, prob, self.errcodeBase + 0, self.name, pos).CDF(x)
provide(BinomialCDF())
class BinomialQF(LibFcn):
name = prefix + "binomialQF"
sig = Sig([{"p": P.Double()}, {"size": P.Int()}, {"prob": P.Double()}], P.Double())
errcodeBase = 13320
def __call__(self, state, scope, pos, paramTypes, p, size, prob):
if math.isinf(prob) or math.isnan(prob) or size <= 0 or prob < 0 or prob > 1:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif not (0.0 <= p <= 1):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif p == 1:
return float(size)
elif p == 0:
return 0.0
else:
return BinomialDistribution(size, prob, self.errcodeBase + 0, self.name, pos).QF(p)
provide(BinomialQF())
################ Uniform
class UniformPDF(LibFcn):
name = prefix + "uniformPDF"
sig = Sig([{"x": P.Double()}, {"min": P.Double()}, {"max": P.Double()}], P.Double())
errcodeBase = 13330
def __call__(self, state, scope, pos, paramTypes, x, min, max):
if math.isinf(min) or math.isnan(min) or math.isinf(max) or math.isnan(max) or min >= max:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif math.isinf(x) or math.isnan(x):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
return UniformDistribution(min, max, self.errcodeBase + 0, self.name, pos).PDF(x)
provide(UniformPDF())
class UniformCDF(LibFcn):
name = prefix + "uniformCDF"
sig = Sig([{"x": P.Double()}, {"min": P.Double()}, {"max": P.Double()}], P.Double())
errcodeBase = 13340
def __call__(self, state, scope, pos, paramTypes, x, min, max):
if math.isinf(min) or math.isnan(min) or math.isinf(max) or math.isnan(max) or min >= max:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif math.isinf(x) or math.isnan(x):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
return UniformDistribution(min, max, self.errcodeBase + 0, self.name, pos).CDF(x)
provide(UniformCDF())
class UniformQF(LibFcn):
name = prefix + "uniformQF"
sig = Sig([{"p": P.Double()}, {"min": P.Double()}, {"max": P.Double()}], P.Double())
errcodeBase = 13350
def __call__(self, state, scope, pos, paramTypes, p, min, max):
if math.isinf(min) or math.isnan(min) or math.isinf(max) or math.isnan(max) or min >= max:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif not (0.0 <= p <= 1.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
else:
return UniformDistribution(min, max, self.errcodeBase + 0, self.name, pos).QF(p)
provide(UniformQF())
################ Geometric
class GeometricPDF(LibFcn):
name = prefix + "geometricPDF"
sig = Sig([{"x": P.Int()}, {"prob": P.Double()}], P.Double())
errcodeBase = 13360
def __call__(self, state, scope, pos, paramTypes, x, prob):
if math.isinf(prob) or math.isnan(prob) or prob <= 0 or prob > 1:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif math.isinf(x) or math.isnan(x):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
else:
return GeometricDistribution(prob, self.errcodeBase + 0, self.name, pos).PDF(x)
provide(GeometricPDF())
class GeometricCDF(LibFcn):
name = prefix + "geometricCDF"
sig = Sig([{"x": P.Double()}, {"prob": P.Double()}], P.Double())
errcodeBase = 13370
def __call__(self, state, scope, pos, paramTypes, x, prob):
if math.isinf(prob) or math.isnan(prob) or prob <= 0 or prob > 1:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif math.isinf(x) or math.isnan(x):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
else:
return GeometricDistribution(prob, self.errcodeBase + 0, self.name, pos).CDF(x)
provide(GeometricCDF())
class GeometricQF(LibFcn):
name = prefix + "geometricQF"
sig = Sig([{"p": P.Double()}, {"prob": P.Double()}], P.Double())
errcodeBase = 13380
def __call__(self, state, scope, pos, paramTypes, p, prob):
if math.isinf(prob) or math.isnan(prob) or prob <= 0 or prob > 1:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif not (0.0 <= p <= 1):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif p == 1:
return float("inf")
else:
return GeometricDistribution(prob, self.errcodeBase + 0, self.name, pos).QF(p)
provide(GeometricQF())
################ Hypergeometric
class HypergeometricPDF(LibFcn):
name = prefix + "hypergeometricPDF"
sig = Sig([{"x": P.Int()}, {"m": P.Int()}, {"n": P.Int()}, {"k": P.Int()}], P.Double())
errcodeBase = 13390
def __call__(self, state, scope, pos, paramTypes, x, m, n, k):
if m + n < k or m < 0 or n <= 0 or m + n == 0 or k < 0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif math.isinf(x) or math.isnan(x):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif x > m:
return 0.0
else:
return HypergeometricDistribution(m, n, k, self.errcodeBase + 0, self.name, pos).PDF(x)
provide(HypergeometricPDF())
class HypergeometricCDF(LibFcn):
name = prefix + "hypergeometricCDF"
sig = Sig([{"x": P.Int()}, {"m": P.Int()}, {"n": P.Int()}, {"k": P.Int()}], P.Double())
errcodeBase = 13400
def __call__(self, state, scope, pos, paramTypes, x, m, n, k):
if m + n < k or m < 0 or n <= 0 or m + n == 0 or k < 0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif math.isinf(x) or math.isnan(x):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif x > m:
return 0.0
else:
return HypergeometricDistribution(m, n, k, self.errcodeBase + 0, self.name, pos).CDF(x)
provide(HypergeometricCDF())
class HypergeometricQF(LibFcn):
name = prefix + "hypergeometricQF"
sig = Sig([{"p": P.Double()}, {"m": P.Int()}, {"n": P.Int()}, {"k": P.Int()}], P.Double())
errcodeBase = 13410
def __call__(self, state, scope, pos, paramTypes, p, m, n, k):
if m + n < k or m < 0 or n <= 0 or m + n == 0 or k < 0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif not (0.0 <= p <= 1.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
else:
return HypergeometricDistribution(m, n, k, self.errcodeBase + 0, self.name, pos).QF(p)
provide(HypergeometricQF())
################ Weibull
class WeibullPDF(LibFcn):
name = prefix + "weibullPDF"
sig = Sig([{"x": P.Double()}, {"shape": P.Double()}, {"scale": P.Double()}], P.Double())
errcodeBase = 13420
def __call__(self, state, scope, pos, paramTypes, x, shape, scale):
if math.isinf(shape) or math.isnan(shape) or math.isinf(scale) or math.isnan(scale) or shape <= 0 or scale <= 0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif math.isinf(x) or math.isnan(x):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif x >= 0:
return WeibullDistribution(shape, scale, self.errcodeBase + 0, self.name, pos).PDF(x)
else:
return 0.0
provide(WeibullPDF())
class WeibullCDF(LibFcn):
name = prefix + "weibullCDF"
sig = Sig([{"x": P.Double()}, {"shape": P.Double()}, {"scale": P.Double()}], P.Double())
errcodeBase = 13430
def __call__(self, state, scope, pos, paramTypes, x, shape, scale):
if math.isinf(shape) or math.isnan(shape) or math.isinf(scale) or math.isnan(scale) or shape <= 0 or scale <= 0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif math.isinf(x) or math.isnan(x):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif x < 0:
return 0.0
else:
return WeibullDistribution(shape, scale, self.errcodeBase + 0, self.name, pos).CDF(x)
provide(WeibullCDF())
class WeibullQF(LibFcn):
name = prefix + "weibullQF"
sig = Sig([{"p": P.Double()}, {"shape": P.Double()}, {"scale": P.Double()}], P.Double())
errcodeBase = 13440
def __call__(self, state, scope, pos, paramTypes, p, shape, scale):
if math.isinf(shape) or math.isnan(shape) or math.isinf(scale) or math.isnan(scale) or shape <= 0 or scale <= 0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif not (0.0 <= p <= 1.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
else:
return WeibullDistribution(shape, scale, self.errcodeBase + 0, self.name, pos).QF(p)
provide(WeibullQF())
################ NegativeBinomial
class NegativeBinomialPDF(LibFcn):
name = prefix + "negativeBinomialPDF"
sig = Sig([{"x": P.Int()}, {"size": P.Int()}, {"prob": P.Double()}], P.Double())
errcodeBase = 13450
def __call__(self, state, scope, pos, paramTypes, x, size, prob):
if math.isinf(prob) or math.isnan(prob) or prob > 1 or prob <= 0 or size < 0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif math.isinf(x) or math.isnan(x):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif x == 0 and size == 0:
return 1.0
elif size == 0:
return 0.0
else:
return NegativeBinomialDistribution(size, prob, self.errcodeBase + 0, self.name, pos).PDF(x)
provide(NegativeBinomialPDF())
class NegativeBinomialCDF(LibFcn):
name = prefix + "negativeBinomialCDF"
sig = Sig([{"x": P.Double()}, {"size": P.Int()}, {"prob": P.Double()}], P.Double())
errcodeBase = 13460
def __call__(self, state, scope, pos, paramTypes, x, size, prob):
if math.isinf(prob) or math.isnan(prob) or prob > 1 or prob <= 0 or size < 0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif math.isinf(x) or math.isnan(x):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif x == 0 and size == 0:
return 1.0
elif size == 0:
return 0.0
elif x < 0:
return 0.0
else:
return NegativeBinomialDistribution(size, prob, self.errcodeBase + 0, self.name, pos).CDF(x)
provide(NegativeBinomialCDF())
class NegativeBinomialQF(LibFcn):
name = prefix + "negativeBinomialQF"
sig = Sig([{"p": P.Double()}, {"size": P.Int()}, {"prob": P.Double()}], P.Double())
errcodeBase = 13470
def __call__(self, state, scope, pos, paramTypes, p, size, prob):
if math.isinf(prob) or math.isnan(prob) or prob <= 0 or prob > 1 or size < 0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif not (0.0 <= p <= 1.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif p == 0 and size == 0:
return 0.0
elif size == 0:
return 0.0
elif p == 1:
return float("inf")
else:
return NegativeBinomialDistribution(size, prob, self.errcodeBase + 0, self.name, pos).QF(p)
provide(NegativeBinomialQF())
#########################################################################################
##### The actual distribution functions #################################################
#########################################################################################
################### Gaussian
class GaussianDistribution(object):
def __init__(self, mu, sigma, name, errcodeBase, pos):
self.name = name
self.errcodeBase = errcodeBase
self.pos = pos
if sigma < 0.0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, self.pos)
self.mu = mu
self.sigma = sigma
def LL(self, x):
if (self.sigma == 0.0) and (x == self.mu):
return float("inf")
elif (self.sigma == 0.0) and (x != self.mu):
return float("-inf")
else:
term1 = -(x - self.mu)**2/(2.0 * self.sigma**2)
term2 = math.log(self.sigma * math.sqrt(2.0 * math.pi))
return term1 - term2
def CDF(self, x):
if (self.sigma == 0.0) and (x < self.mu):
return 0.0
elif (self.sigma == 0.0) and (x >= self.mu):
return 1.0
else:
return 0.5 * (1.0 + math.erf((x - self.mu)/(self.sigma * math.sqrt(2.0))))
def QF(self, p):
if (p > 1.0) or (p < 0.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, self.pos)
elif (p == 1.0):
return float("inf")
elif (p == 0.0):
return float("-inf")
elif (self.sigma == 0.0):
return self.mu
else:
# http://www.johnkerl.org/python/sp_funcs_m.py.txt
c0 = 2.515517
c1 = 0.802853
c2 = 0.010328
d1 = 1.432788
d2 = 0.189269
d3 = 0.001308
sign = -1.0
if (p > 0.5):
sign = 1.0
p = 1.0 - p
arg = -2.0*math.log(p)
t = math.sqrt(arg)
g = t - (c0 + t*(c1 + t*c2)) / (1.0 + t*(d1 + t*(d2 + t*d3)))
standard_normal_qf = sign*g
return self.mu + self.sigma*standard_normal_qf
################### Exponential
class ExponentialDistribution(object):
def __init__(self, rate, name, errcodeBase, pos):
self.name = name
self.errcodeBase = errcodeBase
self.pos = pos
self.rate = rate
if (self.rate < 0.0):
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, self.pos)
def PDF(self, x):
if (self.rate == 0.0):
return 0.0
elif (x < 0.0):
return 0.0
elif (x == 0.0):
return self.rate
else:
return self.rate*math.exp(-self.rate*x)
def CDF(self, x):
if (self.rate == 0.0) or (x == 0.0):
return 0.0
elif (x <= 0.0):
return 0.0
else:
return 1.0 - math.exp(-self.rate*x)
def QF(self, p):
if (p > 1.0) or (p < 0.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, self.pos)
elif (self.rate == 0.0) and (p == 0.0):
return 0.0
elif (self.rate == 0.0) and (p > 0):
return float("inf")
elif (p == 1.0):
return float("inf")
elif (p == 0.0):
return 0.0
else:
return -math.log(1.0 - p)/self.rate
################### Chi2
# from: http://www.stat.tamu.edu/~jnewton/604/chap3.pdf
class Chi2Distribution(object):
def __init__(self, DOF, name, errcodeBase, pos):
self.name = name
self.errcodeBase = errcodeBase
self.pos = pos
self.maxIter = 200
self.epsilon = 1e-15
self.DOF = DOF
if (self.DOF < 0):
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, self.pos)
def PDF(self,x):
if (self.DOF == 0) and (x != 0.0):
return 0.0
elif (self.DOF == 0) and (x == 0.0):
return float("inf")
elif (x == 0.0) and (self.DOF < 2):
return float("inf")
elif (x < 0.0):
return 0.0
else:
return GammaDistribution(self.DOF/2.0, 2.0, self.name, self.errcodeBase, self.pos).PDF(x)
def CDF(self,x):
if math.isnan(x):
return float("nan")
elif (self.DOF == 0) and (x > 0.0):
return 1.0
elif (self.DOF == 0) and (x <= 0.0):
return 0.0
elif (x <= 0.0):
return 0.0
else:
return GammaDistribution(self.DOF/2.0, 2.0, self.name, self.errcodeBase, self.pos).CDF(x)
def QF(self,p):
if (p > 1.0) or (p < 0.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, self.pos)
elif (p == 1.0):
return float("inf")
elif (self.DOF == 0):
return 0.0
elif (p == 0.0):
return 0.0
else:
return GammaDistribution(self.DOF/2.0, 2.0, self.name, self.errcodeBase, self.pos).QF(p)
################### Poisson
class PoissonDistribution(object):
def __init__(self, lamda, name, errcodeBase, pos):
self.name = name
self.errcodeBase = errcodeBase
self.pos = pos
self.lamda = float(lamda)
if (self.lamda < 0.0):
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, self.pos)
def PDF(self, x):
if (self.lamda == 0.0):
if (x != 0.0):
return 0.0
else:
return 1.0
elif (x < 0.0):
return 0.0
else:
return math.exp(x * math.log(self.lamda) - self.lamda - math.lgamma(x + 1.0))
def CDF(self, x):
if math.isnan(x):
return float("nan")
elif (self.lamda == 0.0):
if (x >= 0.0):
return 1.0
else:
return 0.0
elif (x >= 0.0):
return regularizedGammaQ(math.floor(x + 1.0), self.lamda)
else:
return 0.0
def QF(self, p):
if math.isnan(p):
return float("nan")
elif (self.lamda == 0.0):
return 0.0
elif (p > 1.0) or (p < 0.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, self.pos)
elif (p == 1.0):
return float("inf")
elif (p == 0.0):
return 0.0
else:
# step through CDFs until we find the right one
x = 0
p0 = 0.0
while (p0 <= p):
p0 = self.CDF(x)
x += 1
return x - 1
################### Gamma
class GammaDistribution(object):
def __init__(self, shape, scale, name, errcodeBase, pos):
self.name = name
self.errcodeBase = errcodeBase
self.pos = pos
# alpha: shape parameter
self.alpha = shape
# beta: scale parameter
self.beta = scale
self.epsilon = 1e-15
self.maxIter = 800
if (self.alpha < 0.0) or (self.beta < 0.0):
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, self.pos)
def PDF(self, x):
if (self.alpha == 0.0) or (self.beta == 0.0):
if (x != 0.0):
return 0.0
else:
return float("inf")
elif (x < 0.0):
return 0.0
elif (self.alpha == 1.0) and (x == 0.0):
return self.beta
else:
try:
term1a = math.log(x/self.beta) * (self.alpha - 1.0)
except ValueError:
term1a = float("-inf") * (self.alpha - 1.0)
term1 = term1a - math.log(self.beta)
term2 = -x/self.beta
term3 = math.lgamma(self.alpha)
return math.exp(term1 + (term2 - term3))
def CDF(self, x):
if (self.alpha == 0.0) or (self.beta == 0.0):
if (x != 0.0):
return 1.0
else:
return 0.0
elif (x <= 0.0):
return 0.0
else:
return regularizedGammaP(self.alpha, x/self.beta)
def QF(self,p):
if (p > 1.0) or (p < 0.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, self.pos)
elif (p == 1.0):
return float("inf")
elif (p == 0.0):
return 0.0
else:
y = self.alpha*self.beta
y_old = y
for i in range(0,100):
h = (self.CDF(y_old) - p)/self.PDF(y_old)
if y_old - h <= 0.0:
y_new = y_old / 2.0
else:
y_new = y_old - h
if abs(y_new) <= self.epsilon:
y_new = y_old/10.0
h = y_old - y_new
if abs(h) < math.sqrt(self.epsilon):
break
y_old = y_new
return y_new
################### Beta
class BetaDistribution(object):
def __init__(self, alpha, beta, name, errcodeBase, pos):
self.name = name
self.errcodeBase = errcodeBase
self.pos = pos
# first shape parameter
self.alpha = alpha
# second shape parameter
self.beta = beta
if (self.alpha <= 0.0) or (self.beta <= 0.0):
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, self.pos)
# normalization factor
self.Z = math.lgamma(self.alpha) + math.lgamma(self.beta) \
- math.lgamma(self.alpha + self.beta)
# tolerance
self.epsilon = 1e-15
# max Iterations
self.maxIter = 1000
def PDF(self,x):
if (x <= 0.0) or (x >= 1.0):
return 0.0
else:
logX = math.log(x)
if (x < 0.0) and (x > 0.0):
return 0.0
log1mX = math.log1p(-x)
ret = math.exp((self.alpha - 1.0) * logX + (self.beta - 1.0) \
* log1mX - self.Z)
return ret
def CDF(self,x):
if math.isnan(x):
return float("nan")
elif (x <= 0.0):
return 0.0
elif (x >= 1.0):
return 1.0
else:
return incompleteBetaFunction(x,self.alpha,self.beta)
def QF(self,p):
if math.isnan(p):
return float("nan")
elif (p > 1.0) or (p < 0.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, self.pos)
elif (p == 1.0):
return 1.0
elif (p == 0.0):
return 0.0
else:
return inverseIncompleteBetaFunction(p,self.alpha,self.beta)
################### Cauchy
class CauchyDistribution(object):
def __init__(self, location, scale, name, errcodeBase, pos):
self.name = name
self.errcodeBase = errcodeBase
self.pos = pos
self.loc = location
self.s = scale
if self.s <= 0.0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, self.pos)
def PDF(self, x):
term1 = 1.0/(math.pi*self.s)
term2 = 1.0/(1 + pow((x - self.loc)/self.s,2))
return term1 * term2
def CDF(self, x):
return 0.5 + math.atan2(x-self.loc, self.s)*(1.0/math.pi)
def QF(self, p):
if (p < 0.0) or (p > 1.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, self.pos)
elif (p == 1.0):
return float("inf")
elif (p == 0.0):
return float("-inf")
else:
return self.loc + self.s*math.tan(math.pi*(p - 0.5))
################### F
# from: http://www.stat.tamu.edu/~jnewton/604/chap3.pdf
class FDistribution(object):
def __init__(self, upperDOF, lowerDOF, name, errcodeBase, pos):
self.name = name
self.errcodeBase = errcodeBase
self.pos = pos
self.maxIter = 1000
self.epsilon = 1e-8
self.d1 = float(upperDOF)
self.d2 = float(lowerDOF)
if (self.d1 <= 0.0) or (self.d2 <= 0.0):
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, self.pos)
def PDF(self,x):
if (x <= 0.0):
return 0.0
elif (x == 0) and (self.d1 < 2.0):
return float("inf")
elif (x == 0) and (self.d1 == 2.0):
return 1.0
else:
num_arg1 = pow(self.d1/self.d2, self.d1/2.0)
num_arg2 = pow(x, (self.d1/2.0)-1.0)
den_arg1 = math.exp(logBetaFunction(self.d1/2.0, self.d2/2.0))
den_arg2 = pow((1.0 + (self.d1*x)/self.d2), (self.d1 + self.d2)/2.0)
return (num_arg1*num_arg2)/(den_arg1*den_arg2)
def CDF(self,x):
if math.isnan(x):
return float("nan")
elif x <= 0.0:
return 0.0
else:
arg1 = (self.d1*x)/(self.d1*x + self.d2)
arg2 = self.d1/2.0
arg3 = self.d2/2.0
return incompleteBetaFunction(arg1, arg2, arg3)
def QF(self, p):
if (p < 0.0) or (p > 1.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, self.pos)
elif (p == 1.0):
return float("inf")
elif (p == 0.0):
return 0.0
else:
low = 0.0
high = 1.0
while self.CDF(high) < p:
high *= 2.0
diff = None
while diff is None or abs(diff) > self.epsilon:
mid = (low + high) / 2.0
diff = self.CDF(mid) - p
if diff > 0:
high = mid
else:
low = mid
return mid
################### Lognormal
class LognormalDistribution(object):
def __init__(self, meanlog, sdlog, name, errcodeBase, pos):
self.name = name
self.errcodeBase = errcodeBase
self.pos = pos
self.mu = meanlog
self.sigma = sdlog
self.epsilon = 1e-8
self.maxIter = 100
if self.sigma <= 0.0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, self.pos)
def PDF(self, x):
if x <= 0.0:
return 0.0
else:
term1 = 1.0/(x*self.sigma*math.sqrt(2.0*math.pi))
term2 = pow(math.log(x) - self.mu, 2.0)/(2.0*pow(self.sigma, 2.0))
return term1 * math.exp(-term2)
def CDF(self, x):
if x <= 0.0:
return 0.0
else:
return GaussianDistribution(0.0, 1.0, self.name, self.errcodeBase, self.pos).CDF((math.log(x) - self.mu)/self.sigma)
def QF(self, p):
if math.isnan(p):
return float("nan")
if (p < 0.0) or (p > 1.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, self.pos)
elif (p == 0.0):
return 0.0
elif (p == 1.0):
return float("inf")
else:
low = 0.0
high = 1.0
while self.CDF(high) < p:
high *= 2.0
diff = None
while diff is None or abs(diff) > self.epsilon:
mid = (low + high) / 2.0
diff = self.CDF(mid) - p
if diff > 0:
high = mid
else:
low = mid
return mid
# # Using Newton-Raphson algorithm
# if p <= .001:
# self.epsilon = 1e-5
# p1 = p
# if (p1 > 0.8) and (p1 < 0.9):
# p2 = .5
# else:
# p2 = 0.85
# counter = 0
# while (abs(p1 - p2) > self.epsilon) and (counter < self.maxIter):
# q2 = (self.CDF(p2) - p)
# p1 = p2
# p2 = p1 - (q2/self.PDF(p1))
# counter += 1
# return p2
################### Student's T
class TDistribution(object):
def __init__(self, DOF, name, errcodeBase, pos):
self.name = name
self.errcodeBase = errcodeBase
self.pos = pos
self.df = float(DOF)
self.epsilon = 1e-8
self.maxIter = 800
if self.df <= 0.0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, self.pos)
def PDF(self, x):
term1 = 1.0/(math.sqrt(self.df) * math.exp(logBetaFunction(0.5, self.df/2.0)))
term2 = pow(1.0 + (x*x/self.df), -(self.df + 1.0)/2.0)
return term1 * term2
def CDF(self, x):
if math.isnan(x):
return float("nan")
arg1 = self.df/(self.df + x*x)
arg2 = self.df/2.0
arg3 = 0.5
if (x > 0):
return 1.0 - 0.5*incompleteBetaFunction(arg1, arg2, arg3)
elif (x == 0.0):
return 0.5
else:
return 0.5*incompleteBetaFunction(arg1, arg2, arg3)
def QF(self, p):
if (p < 0.0) or (p > 1.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, self.pos)
elif (p == 1.0):
return float("inf")
elif (p == 0.0):
return float("-inf")
else:
low = -1.0
high = 1.0
while self.CDF(low) > p:
low *= 2.0
while self.CDF(high) < p:
high *= 2.0
diff = None
while diff is None or abs(diff) > self.epsilon:
mid = (low + high) / 2.0
diff = self.CDF(mid) - p
if diff > 0:
high = mid
else:
low = mid
return mid
# # Using Newton-Raphson algorithm
# if p <= .001:
# self.epsilon = 1e-5
# p1 = p
# if (p1 > 0.8) and (p1 < 0.9):
# p2 = .5
# else:
# p2 = 0.85
# counter = 0
# while (abs(p1 - p2) > self.epsilon) or (counter < self.maxIter):
# q2 = (self.CDF(p2) - p)
# p1 = p2
# p2 = p1 - (q2/self.PDF(p1))
# counter += 1
# return p2
################### Binomial
class BinomialDistribution(object):
def __init__(self, size, p_success, name, errcodeBase, pos):
self.name = name
self.errcodeBase = errcodeBase
self.pos = pos
self.prob = p_success
self.n = float(size)
if self.n < 0.0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, self.pos)
elif (self.prob < 0.0) or (self.prob > 1.0):
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, self.pos)
def PDF(self, x):
if x < 0.0:
return 0.0
else:
if x == 0:
nchoosek = 1.0
elif (x < 0) or (x > self.n):
nchoosek = 0.0
else:
nchoosek = nChooseK(self.n, x)
return nchoosek * pow(self.prob, x) * pow(1.0 - self.prob, self.n - x)
def CDF(self, x):
if math.isnan(x):
return float("nan")
elif x < 0.0:
return 0.0
else:
if (self.n - x <= 0.0) or (self.prob == 0.0):
return 1.0
else:
x = math.floor(x)
return incompleteBetaFunction(1.0 - self.prob, self.n - x, 1.0 + x)
def QF(self, p):
if (p < 0.0) or (p > 1.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, self.pos)
elif math.isnan(p):
return float("nan")
elif (p == 1.0):
return self.n
elif (p == 0.0):
return 0.0
elif (p > 0.0) and (p < 1.0):
# step through CDFs until we find the right one
x = 0
p0 = 0.0
while (p0 < p):
p0 = p0 + self.PDF(x)
x += 1
return x - 1
else:
return 0.0
################### Uniform
class UniformDistribution(object):
def __init__(self, minimum, maximum, name, errcodeBase, pos):
self.name = name
self.errcodeBase = errcodeBase
self.pos = pos
self.mi = minimum
self.ma = maximum
if self.mi >= self.ma:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, self.pos)
def PDF(self, x):
if (x < self.mi) or (x > self.ma):
return 0.0
elif (math.isnan(x)):
return float("nan")
else:
return 1.0/(self.ma - self.mi)
def CDF(self, x):
if (x < self.mi):
return 0.0
elif (x > self.ma):
return 1.0
else:
return (x - self.mi)/(self.ma - self.mi)
def QF(self, p):
if (p > 1.0) or (p < 0.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, self.pos)
elif (p > 0.0) or (p < 1.0):
return self.mi + p*(self.ma - self.mi)
elif (math.isnan(p)):
return float("nan")
else:
return 0.0
################### Geometric
class GeometricDistribution(object):
def __init__(self, p_success, name, errcodeBase, pos):
self.name = name
self.errcodeBase = errcodeBase
self.pos = pos
self.prob = p_success
if (self.prob < 0.0) or (self.prob > 1.0):
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, self.pos)
def PDF(self, x):
if (x < 0.0):
return 0.0
else:
return self.prob*pow(1.0 - self.prob, x)
def CDF(self, x):
if (x < 0.0):
return 0.0
else:
x = math.floor(x)
return 1.0 - pow(1.0 - self.prob, x + 1.0)
def QF(self, p):
if (p > 1.0) or (p < 0.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, self.pos)
elif (math.isnan(p)):
return float("nan")
elif (p == 1.0):
return float("inf")
elif (p > 0.0) and (p < 1.0):
if self.prob == 1.0:
return 0.0
else:
return math.floor(math.log(1.0 - p)/math.log(1.0 - self.prob))
else:
return 0.0
################### Hypergeometric
class HypergeometricDistribution(object):
def __init__(self, n_white, n_black, n_drawn, name, errcodeBase, pos):
self.name = name
self.errcodeBase = errcodeBase
self.pos = pos
self.n_white = n_white
self.n_black = n_black
self.n_drawn = n_drawn
if (n_white + n_black < n_drawn):
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, self.pos)
def PDF(self, x): # x is number of white balls drawn
# compute nchoosek(n_white,x)
if x == 0:
nchoosek1 = 1.0
elif (x < 0) or (x >= self.n_white):
nchoosek1 = 0.0
else:
nchoosek1 = nChooseK(self.n_white, x)
# compute nchoosek(n_black, n_drawn - x)
if (self.n_drawn - x == 0):
nchoosek2 = 1.0
elif (self.n_drawn - x < 0) or (self.n_drawn - x > self.n_black):
nchoosek2 = 0.0
else:
nchoosek2 = nChooseK(self.n_black, self.n_drawn - x)
# compute nchoosek(n_white + n_black, n_drawn)
if self.n_drawn == 0:
nchoosek3 = 1.0
elif (self.n_drawn < 0) or (self.n_drawn > self.n_white + self.n_black):
nchoosek3 = 0.0
else:
nchoosek3 = nChooseK(self.n_white + self.n_black, self.n_drawn)
# compute
return nchoosek1 * nchoosek2 / nchoosek3
def CDF(self, x):
if (x > self.n_white):
return 0.0
else:
val = 0.0
for i in range(0, int(math.floor(x + 1.0))):
val = val + self.PDF(float(i))
return val
def QF(self, p):
if (p > 1.0) or (p < 0.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, self.pos)
elif math.isnan(p):
return float("nan")
elif (p == 1):
return self.n_drawn
else:
# step through CDFs until we find the right one
x = 0
p0 = 0.0
while (p0 <= p):
p0 = p0 + self.PDF(x)
x += 1
return x - 1
################### Weibull
class WeibullDistribution(object):
def __init__(self, shape, scale, name, errcodeBase, pos):
self.name = name
self.errcodeBase = errcodeBase
self.pos = pos
self.a = float(shape)
self.b = float(scale)
if (self.a <= 0.0) or (self.b <= 0.0):
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, self.pos)
def PDF(self, x):
if math.isnan(x):
return float("nan")
elif x == 0.0:
if self.a < 1.0:
return float("nan")
elif self.a == 1.0:
return 1.0/self.b
else:
return 0.0
elif x >= 0.0:
term1 = (self.a/self.b)
term2 = pow(x/self.b, self.a - 1.0)
term3 = math.exp(-pow(x/self.b, self.a))
return term1 * term2 * term3
else:
return 0.0
def CDF(self, x):
if math.isnan(x):
return float("nan")
elif x >= 0.0:
return 1.0 - math.exp(-pow(x/self.b, self.a))
else:
return 0.0
def QF(self, p):
if (p < 0.0) or (p > 1.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, self.pos)
elif (p == 1.0):
return float("inf")
else:
try:
return self.b * pow(-math.log(1.0 - p), 1.0/self.a)
except OverflowError:
return float("inf")
################### NegativeBinomial
class NegativeBinomialDistribution(object):
def __init__(self, n, p, errcodeBase, name, pos):
self.name = name
self.errcodeBase = errcodeBase
self.pos = pos
self.n = n
self.prob = p
if (p > 1.0) or (p <= 0.0) or (self.n < 0.0):
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, self.pos)
def PDF(self, x):
if (math.isnan(x)):
return float("nan")
elif (self.n == 0.0) and (x == 0.0):
return 1.0
elif (self.n == 0.0) and (x != 0.0):
return 0.0
elif (self.prob == 1.0) and (x != 0.0):
return 0.0
elif (self.prob == 1.0) and (x == 0.0):
return 1.0
elif (x >= 0.0):
if x == 0:
val = 1.0
elif x < 0:
val = 0.0
else:
val = nChooseK(self.n + x - 1.0, x)
return val * pow(1.0 - self.prob, x) * pow(self.prob, self.n)
else:
return 0.0
def CDF(self, x):
if math.isnan(x):
return float("nan")
elif (self.n == 0.0) and (x == 0.0):
return 1.0
val = 0.0
for i in range(0, int(math.floor(x + 1.0))):
val = val + self.PDF(float(i))
return val
def QF(self, p):
# CDF SEEMS MORE ACCURATE NOW, REVISIT
if math.isnan(p):
return float("nan")
elif (self.n == 0.0) and (x == 0.0):
return 0.0
elif (self.n == 0.0):
return 0.0
elif (p == 0.0):
return 0.0
elif (p == 1.0):
return float("inf")
elif (p > 1.0) or (p < 0.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, self.pos)
elif self.prob == 1.0:
return 0.0
elif (p > 0.0) or (p < 1.0):
# using Cornish-Fisher expansion
Q = 1.0/self.prob
P = (1.0 - self.prob)*Q
mu = self.n * (1.0 - self.prob)/self.prob
sigma = math.sqrt(self.n * P * Q)
gamma = (Q + P)/sigma
z = GaussianDistribution(0.0, 1.0, self.name, self.errcodeBase, self.pos).QF(p)
# CF approximation gets us close
w = math.ceil(mu + sigma * (z + gamma * (z*z - 1.0)/6.0))
# use math.ceil (next term has a minus sign)
# CDF seems mildly unstable for extreme values
# cant use newton raphson. The way this computes CDF doesnt
# seem to be accurate enough for these extreme cases
# CDF
# only use CD for very extreme values
if w > 100000:
return w
else: # do the step-through-CDF method
x = 0.0
s = 0.0
while (s <= p):
s = s + self.PDF(x)
x += 1
return x - 1
```
#### File: titus/producer/kmeans.py
```python
import math
import random
from collections import OrderedDict
import numpy
from titus.producer.transformation import Transformation
import titus.prettypfa
### metrics are nested objects like Euclidean(AbsDiff()), and they may
### be user-defined. They mirror the metrics and similarity functions
### available in PFA.
### interfaces
def _NotImplementedError():
raise NotImplementedError
class Similarity(object):
"""Trait for similarity functions in Numpy and PFA (compare two scalars, return a non-negative number)."""
def __init__(self):
self.calculate = lambda dataset, cluster: _NotImplementedError()
def pfa(self):
raise NotImplementedError
class Metric(object):
"""Trait for metric functions in Numpy and PFA (compare two vectors, return a non-negative number)."""
def __init__(self):
self.calculate = lambda dataset, cluster: _NotImplementedError()
def pfa(self):
raise NotImplementedError
### similarity
class AbsDiff(Similarity):
"""Absolute difference similarity function for Numpy and PFA."""
def __init__(self):
self.calculate = lambda dataset, cluster: numpy.absolute(dataset - cluster)
def pfa(self):
return {"fcn": "metric.absDiff"}
class GaussianSimilarity(Similarity):
"""Gaussian similarity function for Numpy and PFA."""
def __init__(self, sigma):
self.calculate = lambda dataset, cluster: numpy.exp(-numpy.log(2) * numpy.square(dataset - cluster) / sigma**2)
self.sigma = sigma
def pfa(self):
x = "similarityX"
y = "similarityY"
return {"params": [{x: "double"}, {y: "double"}],
"ret": "double",
"do": {"metric.gaussianSimilarity": [x, y, self.sigma]}}
### metrics
class Euclidean(Metric):
"""Euclidean metric for Numpy and PFA."""
def __init__(self, similarity):
self.calculate = lambda dataset, cluster: numpy.sqrt(numpy.sum(numpy.square(similarity.calculate(dataset, cluster)), axis=1))
self.similarity = similarity
def pfa(self, x, y):
return {"metric.euclidean": [self.similarity.pfa(), x, y]}
class SquaredEuclidean(Metric):
"""Squared euclidean metric for Numpy and PFA."""
def __init__(self, similarity):
self.calculate = lambda dataset, cluster: numpy.sum(numpy.square(similarity.calculate(dataset, cluster)), axis=1)
self.similarity = similarity
def pfa(self, x, y):
x = "metricX"
y = "metricY"
return {"metric.squaredEuclidean": [self.similarity.pfa(), x, y]}
class Chebyshev(Metric):
"""Chebyshev (maximum) metric for Numpy and PFA."""
def __init__(self, similarity):
self.calculate = lambda dataset, cluster: numpy.max(similarity.calculate(dataset, cluster), axis=1)
self.similarity = similarity
def pfa(self, x, y):
x = "metricX"
y = "metricY"
return {"metric.chebyshev": [self.similarity.pfa(), x, y]}
class Taxicab(Metric):
"""Taxicab (sum) metric for Numpy and PFA."""
def __init__(self, similarity):
self.calculate = lambda dataset, cluster: numpy.sum(similarity.calculate(dataset, cluster), axis=1)
self.similarity = similarity
def pfa(self, x, y):
x = "metricX"
y = "metricY"
return {"metric.taxicab": [self.similarity.pfa(), x, y]}
class Minkowski(Metric):
"""Minkowski metric for Numpy and PFA."""
def __init__(self, similarity, p):
self.calculate = lambda dataset, cluster: numpy.pow(numpy.sum(numpy.pow(similarity.calculate(dataset, cluster), p), axis=1), 1.0/p)
self.similarity = similarity
self.p = p
def pfa(self, x, y):
x = "metricX"
y = "metricY"
return {"metric.minkowski": [self.similarity.pfa(), x, y, self.p]}
### stopping conditions are functions that take iterationNumber (int),
# corrections (Python list of Numpy arrays), datasetSize (int) and
# return bool (continue iterating if True)
### they may be user-defined or constructed from these functions like
# whileall(printChange("6.4f"), halfChange(0.001), clusterJumped()) to
# print iteration data, stop when at least half change by less than
# 0.001, and keep going if one jumped
def printValue(format="g"):
"""Generates a "stopping condition" that prints the current value and never stops.
:type format: string
:param format: format string ("g" is general number, "8.3f" is 8-characters wide, 3-digits after the decimal floating point, etc.)
:rtype: callable that takes iterationNumber, corrections, values, datasetSize as arguments
:return: stopping condition function
"""
state = {"ready": False}
def out(iterationNumber, corrections, values, datasetSize):
if not state["ready"]:
state["j"] = "{:5s} (jump)"
for v in values:
if v is not None:
state["n"] = "{0:5s}" + "".join((" {%d:%s}" % (i + 1, format)) for i in xrange(len(v)))
break
if "n" in state:
state["ready"] = True
print "iter values"
print "----------------------------------"
for index, v in enumerate(values):
if index == 0:
it = repr(iterationNumber)
else:
it = ""
if v is None:
print state["j"].format(it)
else:
print state["n"].format(it, *v)
return True
return out
def printChange(format="g"):
"""Generates a "stopping condition" that prints changes in values and never stops.
:type format: string
:param format: format string ("g" is general number, "8.3f" is 8-characters wide, 3-digits after the decimal floating point, etc.)
:rtype: callable that takes iterationNumber, corrections, values, datasetSize as arguments
:return: stopping condition function
"""
state = {"ready": False}
def out(iterationNumber, corrections, values, datasetSize):
if not state["ready"]:
state["j"] = "{:5s} (jump)"
for corr in corrections:
if corr is not None:
state["n"] = "{0:5s}" + "".join((" {%d:%s}" % (i + 1, format)) for i in xrange(len(corr)))
break
if "n" in state:
state["ready"] = True
print "iter changes"
print "----------------------------------"
for index, corr in enumerate(corrections):
if index == 0:
it = repr(iterationNumber)
else:
it = ""
if corr is None:
print state["j"].format(it)
else:
print state["n"].format(it, *corr)
return True
return out
def clusterJumped():
"""Generates a stopping condition that stops if no clusters jumped (reset to a random point because of encounting nan).
:rtype: callable that takes iterationNumber, corrections, values, datasetSize as arguments
:return: stopping condition function
"""
return lambda iterationNumber, corrections, values, datasetSize: all(x is not None for x in corrections)
def maxIterations(number):
"""Generates a stopping condition that stops after a given number of iterations have passed.
:rtype: callable that takes iterationNumber, corrections, values, datasetSize as arguments
:return: stopping condition function
"""
return lambda iterationNumber, corrections, values, datasetSize: iterationNumber < number
def allChange(threshold):
"""Generates a stopping condition that stops if all cluster changes are less than a threshold.
:type threshold: number
:param threshold: maximum change allowed for all clusters
:rtype: callable that takes iterationNumber, corrections, values, datasetSize as arguments
:return: stopping condition function
"""
return lambda iterationNumber, corrections, values, datasetSize: not all((numpy.absolute(x) < threshold).all() for x in corrections if x is not None)
def halfChange(threshold):
"""Generates a stopping condition that stops if half of the cluster changes are less than a threshold.
:type threshold: number
:param threshold: maximum change allowed for half of the clusters
:rtype: callable that takes iterationNumber, corrections, values, datasetSize as arguments
:return: stopping condition function
"""
return lambda iterationNumber, corrections, values, datasetSize: numpy.sum([(numpy.absolute(x) < threshold).all() for x in corrections if x is not None], dtype=numpy.dtype(float)) / numpy.sum([x is not None for x in corrections], dtype=numpy.dtype(float)) < 0.5
def whileall(*conditions):
"""Generates a stopping condition that continues while all of its subconditions continue.
:type conditions: stopping condition functions
:param conditions: subconditions
:rtype: callable that takes iterationNumber, corrections, values, datasetSize as arguments
:return: stopping condition function
"""
return lambda *args: all(x(*args) for x in conditions)
def whileany(*conditions):
"""Generates a stopping condition that continues while any of its subconditions continue.
:type conditions: stopping condition functions
:param conditions: subconditions
:rtype: callable that takes iterationNumber, corrections, values, datasetSize as arguments
:return: stopping condition function
"""
return lambda *args: any(x(*args) for x in conditions)
def moving():
"""Generates a stopping condition that stops when all clusters change less than 1e-15 and none are jumping (reset to a random point because of encounting nan).
:type conditions: stopping condition functions
:param conditions: subconditions
:rtype: callable that takes iterationNumber, corrections, values, datasetSize as arguments
:return: stopping condition function
"""
return whileall(clusterJumped(), allChange(1e-15))
### the KMeans class
class KMeans(object):
"""Represents a k-means optimization by storing a dataset and performing all operations *in-place*.
Usually, you would construct the object, possibly stepup, then optimize and export to pfaDocument.
"""
def __init__(self, numberOfClusters, dataset, weights=None, metric=Euclidean(AbsDiff()), minPointsInCluster=None, maxPointsForClustering=None):
"""Construct a KMeans object, initializing cluster centers to unique, random points from the dataset.
:type numberOfClusters: positive integer
:param numberOfClusters: number of clusters (the "k" in k-means)
:type dataset: 2-d Numpy array
:param dataset: dataset to cluster; ``dataset.shape[0]`` is the number of records (rows), ``dataset.shape[1]`` is the number of dimensions for each point (columns)
:type weights: 1-d Numpy array or ``None``
:param weights: how much to weight each point in the ``dataset``: must have shape equal to ``(dataset.shape[0],)``; ``0`` means ignore the dataset, ``1`` means normal weight; ``None`` generates all ones
:type metric: titus.produce.kmeans.Metric
:param metric: metric for Numpy and PFA, such as ``Euclidean(AbsDiff())``
:type minPointsInCluster: non-negative integer or ``None``
:param minPointsInCluster: minimum number of points before jumping (replacing cluster with a random point during optimization)
:type maxPointsForClustering: positive integer or ``None``
:param maxPointsForClustering: maximum number of points in an optimization (if ``dataset.shape[0]`` exceeds this amount, a random subset is chosen)
"""
if len(dataset.shape) != 2:
raise TypeError("dataset must be two-dimensional: dataset.shape[0] is the number of records (rows), dataset.shape[1] is the number of dimensions (columns)")
self.dataset = dataset
if weights is not None and weights.shape != (dataset.shape[0],):
raise TypeError("weights must have as many records as the dataset and must be one dimensional")
self.weights = weights
self.metric = metric
try:
flattenedView = numpy.ascontiguousarray(self.dataset).view(numpy.dtype((numpy.void, self.dataset.dtype.itemsize * self.dataset.shape[1])))
_, indexes = numpy.unique(flattenedView, return_index=True)
self.uniques = self.dataset[indexes]
except TypeError:
self.uniques = self.dataset
if self.uniques.shape[0] <= numberOfClusters:
raise TypeError("the number of unique records in the dataset ({0} in this case) must be strictly greater than numberOfClusters ({1})".format(self.uniques.shape[0], numberOfClusters))
self.numberOfClusters = numberOfClusters
self.clusters = []
for index in xrange(numberOfClusters):
self.clusters.append(self.newCluster())
self.minPointsInCluster = minPointsInCluster
self.maxPointsForClustering = maxPointsForClustering
def randomPoint(self):
"""Pick a random point from the dataset.
:rtype: 1-d Numpy array
:return: a *copy* of a random point
"""
# make sure to copy it, so there are no hidden connections between dataset and clusters
return self.uniques[random.randint(0, self.uniques.shape[0] - 1),:].copy()
def newCluster(self):
"""Pick a random point from the dataset and ensure that it is different from all other cluster centers.
:rtype: 1-d Numpy array
:return: a *copy* of a random point, guaranteed to be different from all other clusters.
"""
newCluster = self.randomPoint()
while any(numpy.array_equal(x, newCluster) for x in self.clusters):
newCluster = self.randomPoint()
return newCluster
def randomSubset(self, subsetSize):
"""Return a (dataset, weights) that are randomly chosen to have ``subsetSize`` records.
:type subsetSize: positive integer
:param subsetSize: size of the sample
:rtype: (2-d Numpy array, 1-d Numpy array)
:return: (dataset, weights) sampled without replacement (if the original dataset is unique, the new one will be, too)
"""
if subsetSize <= self.numberOfClusters:
raise TypeError("subsetSize must be strictly greater than the numberOfClusters")
indexes = random.sample(xrange(self.dataset.shape[0]), subsetSize)
dataset = self.dataset[indexes,:]
if self.weights is None:
weights = None
else:
weights = self.weights[indexes]
return dataset, weights
def closestCluster(self, dataset=None, weights=None):
"""Identify the closest cluster to each element in the dataset.
:type dataset: 2-d Numpy array or ``None``
:param dataset: an input dataset or the built-in dataset if ``None`` is passed
:type weights: 1-d Numpy array or ``None``
:param weights: input weights or the built-in weights if ``None`` is passed
:rtype: 1-d Numpy array of integers
:return: the *indexes* of the closest cluster for each datum
"""
if dataset is None:
dataset = self.dataset
if weights is None:
weights = self.weights
# distanceToCenter is the result of applying the metric to each point in the dataset, for each cluster
# distanceToCenter.shape[0] is the number of records, distanceToCenter.shape[1] is the number of clusters
distanceToCenter = numpy.empty((dataset.shape[0], self.numberOfClusters), dtype=numpy.dtype(float))
for clusterIndex, cluster in enumerate(self.clusters):
distanceToCenter[:, clusterIndex] = self.metric.calculate(dataset, cluster)
# indexOfClosestCluster is the cluster classification for each point in the dataset
return numpy.argmin(distanceToCenter, axis=1)
def iterate(self, dataset, weights, iterationNumber, condition):
"""Perform one iteration step (in-place; modifies ``self.clusters``).
:type dataset: 2-d Numpy array
:param dataset: an input dataset
:type weights: 1-d Numpy array
:param weights: input weights
:type iterationNumber: non-negative integer
:param iterationNumber: the iteration number
:type condition: callable that takes iterationNumber, corrections, values, datasetSize as arguments
:param condition: the stopping condition
:rtype: bool
:return: the result of the stopping condition
"""
indexOfClosestCluster = self.closestCluster(dataset, weights)
values = []
corrections = []
for clusterIndex, cluster in enumerate(self.clusters):
# select by cluster classification
selection = (indexOfClosestCluster == clusterIndex)
residuals = (dataset - cluster)[selection]
# weights scale the residuals
if weights is not None:
residuals *= weights[selection]
if self.minPointsInCluster is not None and numpy.sum(selection) < self.minPointsInCluster:
# too few points in this cluster; jump to a new random point
self.clusters[clusterIndex] = self.newCluster()
values.append(None)
corrections.append(None)
else:
# compute the mean of the displacements of points associated with this cluster
# (note that the similarity metric used here is the trivial one, possibly different from the classification metric)
correction = residuals.mean(axis=0)
numpy.add(cluster, correction, cluster)
if not numpy.isfinite(cluster).all():
# mean of a component is NaN or Inf, possibly because of zero points in the cluster
self.clusters[clusterIndex] = self.newCluster()
values.append(None)
corrections.append(None)
else:
# good step: correction is not None
values.append(cluster)
corrections.append(correction)
# call user-supplied test for continuation
return condition(iterationNumber, corrections, values, dataset.shape[0])
def stepup(self, condition, base=2):
"""Optimize the cluster set in successively larger subsets of the dataset. (This can be viewed as a cluster seeding technique.)
If randomly seeded, optimizing the whole dataset can be slow to converge: a long time per iteration times many iterations.
Optimizing a random subset takes as many iterations, but the time per iteration is short. However, the final cluster centers are only approximate.
Optimizing the whole dataset with approximate cluster starting points takes a long time per iteration but fewer iterations.
This procedure runs the k-means optimization technique on random subsets with exponentially increasing sizes from the smallest base**x that is larger than minPointsInCluster (or numberOfClusters) to the largest base**x that is a subset of the whole dataset.
:type condition: callable that takes iterationNumber, corrections, values, datasetSize as arguments
:param condition: the stopping condition
:type base: integer greater than 1
:param base: the factor by which the subset size is increased after each convergence
:rtype: ``None``
:return: nothing; modifies cluster set in-place
"""
if self.minPointsInCluster is None:
minPointsInCluster = self.numberOfClusters
else:
minPointsInCluster = max(self.numberOfClusters, self.minPointsInCluster)
if self.maxPointsForClustering is None:
maxPointsForClustering = self.dataset.shape[0]
else:
maxPointsForClustering = self.maxPointsForClustering
trialSizes = [base**x for x in xrange(int(math.log(maxPointsForClustering, base)) + 1) if base**x > minPointsInCluster]
for trialSize in trialSizes:
dataset, weights = self.randomSubset(trialSize)
iterationNumber = 0
while self.iterate(dataset, weights, iterationNumber, condition):
iterationNumber += 1
def optimize(self, condition):
"""Run a standard k-means (Lloyd's algorithm) on the dataset, changing the clusters *in-place*.
:type condition: callable that takes iterationNumber, corrections, values, datasetSize as arguments
:param condition: the stopping condition
:rtype: ``None``
:return: nothing; modifies cluster set in-place
"""
if self.maxPointsForClustering is None:
dataset, weights = self.dataset, self.weights
else:
dataset, weights = self.randomSubset(self.maxPointsForClustering)
iterationNumber = 0
while self.iterate(dataset, weights, iterationNumber, condition):
iterationNumber += 1
def centers(self, sort=True):
"""Get the cluster centers as a sorted Python list (canonical form).
:type sort: bool
:param sort: if ``True``, sort the centers for stable results
:rtype: list of list of numbers
:return: the cluster centers as Pythonized JSON
"""
if sort:
centers = sorted(map(list, self.clusters))
else:
centers = map(list, self.clusters)
return centers
def pfaType(self, clusterTypeName, idType="string", centerComponentType="double", populations=False):
"""Create a PFA type schema representing this cluster set.
:type clusterTypeName: string
:param clusterTypeName: name of the PFA record type
:type idType: Pythonized JSON
:param idType: subtype for the ``id`` field
:type centerComponentType: Pythonized JSON
:param centerComponentType: subtype for the center array items
:type populations: bool
:param populations: if ``True``, include the number of training points as a "population" field
:rtype: Pythonized JSON
:return: PFA type schema for an array of clusters
"""
fields = [{"name": "center", "type": {"type": "array", "items": centerComponentType}},
{"name": "id", "type": idType}]
if populations:
fields.append({"name": "population", "type": "int"})
return {"type": "array",
"items": {"type": "record",
"name": clusterTypeName,
"fields": fields}}
def pfaValue(self, ids, populations=False, sort=True):
"""Create a PFA data structure representing this cluster set.
:type ids: list of string
:param ids: names of the clusters
:type populations: bool
:param populations: if ``True``, include the number of training points as a "population" field
:type sort: bool
:param sort: if ``True``, sort the centers for stable results
:rtype: Pythonized JSON
:return: data structure that should be inserted in the ``init`` section of the cell or pool containing the clusters
"""
if len(ids) != self.numberOfClusters:
raise TypeError("ids should be a list with length equal to the number of clusters")
out = [{"center": x} for x in self.centers(sort=False)]
if populations:
indexOfClosestCluster = self.closestCluster(self.dataset, self.weights)
for clusterIndex in xrange(len(self.clusters)):
out[clusterIndex]["population"] = int(numpy.sum(indexOfClosestCluster == clusterIndex))
if sort:
indexes = [i for i, x in sorted(list(enumerate(self.clusters)), lambda a, b: cmp(list(a[1]), list(b[1])))]
out = list(numpy.array(out, dtype=object)[indexes])
for idi, cluster in zip(ids, out):
cluster["id"] = idi
return out
def pfaDocument(self, clusterTypeName, ids, populations=False, sort=True, preprocess=None, idType="string", dataComponentType="double", centerComponentType="double"):
"""Create a PFA document to score with this cluster set.
:type clusterTypeName: string
:param clusterTypeName: name of the PFA record type
:type ids: list of string
:param ids: names of the clusters
:type populations: bool
:param populations: if ``True``, include the number of training points as a "population" field
:type sort: bool
:param sort: if ``True``, sort the centers for stable results
:type preprocess: PrettyPFA substitution or ``None``
:param preprocess: pre-processing expression
:type idType: Pythonized JSON
:param idType: subtype for the ``id`` field
:type dataComponentType: Pythonized JSON
:param dataComponentType: subtype for the data array items
:type centerComponentType: Pythonized JSON
:param centerComponentType: subtype for the center array items
:rtype: Pythonized JSON
:return: a complete PFA document that performs clustering
"""
clusters = self.pfaValue(ids, populations, sort)
metric = self.metric.pfa("datum", "clusterCenter")
if preprocess is None:
preprocess = "input"
if populations:
populationsField = '''
populations: int,
'''
else:
populationsField = ""
return titus.prettypfa.jsonNode('''
types:
ClusterType = record(<<clusterTypeName>>,
id: <<idType>>,{0}
center: array(<<centerComponentType>>))
input: array(<<dataComponentType>>)
output: <<idType>>
cells:
clusters(array(ClusterType)) = <<clusters>>
action:
model.cluster.closest(<<preprocess>>, clusters,
fcn(datum: array(<<dataComponentType>>),
clusterCenter: array(<<centerComponentType>>) -> double)
<<metric>>)["id"]
'''.format(populationsField), **vars())
``` |
{
"source": "jmillerbrooks/capital_bikeshare",
"score": 2
} |
#### File: cabi/archived/ebikes.py
```python
import pandas as pd
import numpy as np
from shapely.geometry import Point
import geopandas as gpd
from cabi.utils import which_anc, station_anc_dict
from cabi.get_data import anc_gdf
gdf = anc_gdf()
anc_dict = station_anc_dict()
station_keys = anc_dict.keys()
## NEEDS WORK!! FIX GET_DATA MODULE SO THAT LOAD CLEAN DOCKLESS CAN JUST CALL FROM THERE
def load_clean_dockless():
# FIX THIS CALL GET_DATA MODULE
df = pd.read_pickle('../data/wip/raw_dockless.pkl')
cleaned_ebikes = clean_frame(df)
cleaned_ebikes = cleaned_ebikes.drop('rideable_type', axis=1)
return cleaned_ebikes
def load_geo_ebikes():
df = load_clean_dockless()
geo_ebikes = to_geo(df)
return geo_ebikes
def load_clean_full():
"""DOCSTRING MAKE THIS EXTENSIBLE TO MORE MONTHS"""
df = pd.read_pickle('../data/wip/raw_apr_to_jul_df.pkl')
cleaned_full = clean_frame(df)
return cleaned_full
def geo_longer(df):
"""NEEDS DOCSTRING THIS FUNCTION MAKES ONE TIME COLUMN FROM START/END
AND DOUBLES THE LENGTH OF THE DF IN PROCESS, A GOOD TEST IS WHETHER OR NOT
THE LEN IS 2x OG DF"""
# List all the columns that are not start/end time for easy melt operation below
cols = list(df.columns)
cols.remove('started_at')
cols.remove('ended_at')
# Combine started_at/ended_at into one column 'time', indicating whether
# this was a trip start or trip end in another column, 'start_end',
# set index of new df to 'time'
# sort the index, so it makes sense as a time series
long_geo = df.rename(columns={'started_at': 'start', 'ended_at': 'end'}) \
.melt(id_vars=cols \
, value_vars=['start', 'end'] \
, var_name='start_end' \
, value_name='time') \
.set_index('time') \
.sort_index()
return long_geo
def load_long_geo():
"""DOCSTRING"""
df = load_geo_ebikes()
long_geo = geo_longer(df)
return long_geo
def load_long_geo_full():
"""DOCSTRING"""
df = load_geo_ebikes()
long_geo = geo_longer(df)
return long_geo
def anc_frame(df):
"""DOCSTRING"""
anc_df = df.drop(['start_station_name', 'end_station_name'], axis=1)
return anc_df
def load_long_anc():
"""DOCSTRING"""
df = load_long_geo()
anc_df = anc_frame(df)
return anc_df
# NEEDS WORK!! FIX DOCSTRING!! GENERALIZE TO ANY LOCATION COL (station etc.)
# This is likely uneccesary now that we have a more generalized long df function
def net_gain_loss_anc(ANC_name, df):
"""NEEDS DOCSTRING THIS FUNCTION RETURNS A SERIES (list? np.array?) OF 1 0 -1 VALUES
1 if RIDE ENDED IN ANC 0 IF RIDE DID NOT LEAVE OR END IN ANC -1 IF RIDE LEFT FROM ANC"""
conditions = [
(df['start_end'] == 'start') & (df['ANC_start'] == ANC_name),
(df['start_end'] == 'end') & (df['ANC_end'] == ANC_name),
(df['ANC_start'] != ANC_name) & (df['ANC_end'] != ANC_name),
(df['start_end'] == 'end') & (df['ANC_end'] != ANC_name),
(df['start_end'] == 'start') & (df['ANC_start'] != ANC_name)
]
values = [
-1,
1,
0,
0,
0
]
return np.select(conditions, values)
def plus_minus_anc_frame(df):
"""DOCSTRING GENERALIZE THIS FUNCTION TO ACCEPT OTHER THINGS BESIDE ANC REMOVE DEPENDENCY ON GDF"""
# Create dictionary of ancs (keys) and series of plus minus values returned from net_gain_loss_anc (values)
# for each unique ANC_ID
plus_minus_dict = {anc: net_gain_loss_anc(anc, df) \
for anc in \
list(gdf.ANC_ID)}
# Convert dict to dataframe, index by the (time) index of long_anc_df passed
anc_plus_minus_df = pd.DataFrame(plus_minus_dict, index=df.index)
return anc_plus_minus_df
def load_plus_minus_anc():
df = load_long_anc()
plus_minus = plus_minus_anc_frame(df)
return plus_minus
```
#### File: cabi/etl/transform.py
```python
import pandas as pd
import numpy as np
from scipy.stats import mode
from functools import partial
import pmdarima.preprocessing as ppc
from statsmodels.tsa.deterministic import CalendarSeasonality
def net_gain_loss(location, df, col='ANC'):
"""Return an np.array of the effect of ride on a given column value's net gain or loss in 1 0 -1 VALUES
1 if RIDE ENDED IN location 0 IF RIDE DID NOT LEAVE OR END IN location -1 IF RIDE LEFT FROM location
will be the length of the df/col passed as params"""
conditions = [
(df['start_end'] == 'start') & (df[col] == location),
(df['start_end'] == 'end') & (df[col] == location),
(df[col] != location)
]
values = [
-1,
1,
0
]
return np.select(conditions, values)
# Figure out how computationally expensive it would be to store all results including the exogenous columns
def to_plus_minus(df, col='ANC'):
"""DOCSTRING, accepts a location column, returns a df where the columns correspond to
the values of the col passed in params, values of each column are a timeseries of gain/loss
values one of (-1,0,1) yielded from net_gain_loss_location"""
# Build iterable of locations from unique values of a column, ignoring entries that are not in an ANC
# This is an area for future improvement, see for example map of rides that did not end in ANC but started
# Outside of DC, there is a clear edge effect, indicating that a better approach here would be to cluster
# locations throught the entire region rather than using the rather arbitrary ANC tiling
# used here for expediency to limit scope of the project
locations = locations = [
location for location in df[col].unique() if location != 'Outside']
# Create dictionary of locations (keys) and series of plus minus values returned
# from net_gain_loss(location, df, col=col) for each unique location in locations
plus_minus_dict = {location: net_gain_loss(location, df, col)
for location in
locations}
# Convert dict to dataframe, index by the (time) index of long_anc_df passed
plus_minus_df = pd.DataFrame(plus_minus_dict, index=df.index)
return plus_minus_df
def cumulative_change(df, window_size):
"""DOCSTRING window_size must be an int or offset passable to pandas.DataFrame.rolling(window)
intended for use as an offset"""
rolling_df = df.rolling(window_size).sum()
return rolling_df
def series_to_interval(series, interval):
"""DOCSTRING take mode accross each one hour period if there are values, if no values, i.e. mode returns na like, presumed change is zero"""
regular = series.resample(
interval
).apply(
lambda x:
mode(x)[0] if mode(x)[0].size > 0
else np.nan
).interpolate('time')
return regular
def snap_to_interval(df, interval):
change_func = partial(series_to_interval, interval=interval)
return df.apply(change_func)
def get_seasonal_dummies(df):
"""Accepts a time-indexed df of hourly data, returns hourly and weekday dummies as a df
to passed as exogenous variables in a SARIMAX model"""
columns = df.columns
new_df = df.copy()
new_df['time'] = new_df.index
# create weekday dummy generator
wday_dumgen = ppc.DateFeaturizer(
column_name='time', with_day_of_month=False)
# since all have the same index, we can use any column in the df to generate the day_dums
_, wday_dums = wday_dumgen.fit_transform(new_df[columns[0]], new_df)
# drop the columns that aren't dummies
wday_dums = wday_dums[wday_dums.columns[-7:]]
# set the index for easy merging
wday_dums.set_index(new_df.index, inplace=True)
# create hourly dummy generator
hourly_dumgen = CalendarSeasonality('H', 'D')
# generate dummies
hourly_dums = hourly_dumgen.in_sample(new_df.index)
# merge results
full_dums = wday_dums.merge(hourly_dums, on='time')
return full_dums
```
#### File: capital_bikeshare/cabi/model.py
```python
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error, mean_squared_log_error
from pmdarima.metrics import smape
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.holtwinters import ExponentialSmoothing
from statsmodels.tsa.statespace.sarimax import SARIMAX
import re
def persistence_model(series):
"""DOCSTRING
Wrapper for baseline persistence model"""
return [x for x in series]
# Need to Find a Context Manager or Other Way to capture STD OUT
# That is not %%capture, so that this will run without Jupyter
# (This works on std out, but we need to capture std out to feed into it)
def get_cv_params(std_text):
"""accepts stdout generated by cross-validate, returns the "Best Model" Objects
for each validated fold as a list of lists of ints
Example output for a cv with four folds and s=24 might be
[[2, 0, 3, 2, 1, 0, 24],
[2, 0, 1, 2, 1, 0, 24],
[3, 0, 3, 0, 1, 1, 24],
[1, 0, 0, 0, 1, 1, 24]]"""
# build a match pattern that matches a string that looks like
# this: 'Best model: ARIMA(1,0,0)(0,1,1)[24]' from full contents of stdout
# containing the ARIMA parameters from each fold in a cv search
mod_pattern = re.compile(r'Best.*24]')
# Match the pattern against stdout
pat_list = (re.findall(mod_pattern, std_text))
# Find all digits in each param in pat_list, save as list of params
params = [re.findall(r'\d', pat_list[j]) for j, _ in enumerate(pat_list)]
# this is a bit complicated: but does a fairly simple thing:
# converts params from a list of lists filled with digits as individual strings of len 1
# to a list of lists filled with digits as ints. since s=24 is specified in our AutoArima pipe
# the last two digits are always 2 and 4, we fix this by converting them both to 24 and
# removing the last digit, converting a list like ['2', '0', '3', '2', '1', '0', '2', '4']
# to for example [2, 0, 3, 2, 1, 0, 24], which we can feed as SARIMA params
params = [[int(param[p]) if (p < len(param) - 2) else 24 for p,
_ in enumerate(param)][:-1] for param in params]
return params
def test_stationarity(series, print_vals=True):
"""Simple wrapper around adfuller that prints in more readable format
Adapted slightly from <NAME>'s machinelearningmastery.com
Params:
series (series) a timeseries
Returns:
adfuller test result"""
result = adfuller(series)
print('ADF Statistic: %f' % result[0])
print('p-value: %f' % result[1])
print('Critical Values:')
for key, value in result[4].items():
print('\t%s: %.3f' % (key, value))
return result
def RMSE(y_true, y_pred, last_only=True):
"""Simple wrapper function on mean_squared_error to return RMSE
Params:
predictions (series or array like object), the predicted values
from model test_data (series or array like object), the true
target values
Returns:
RMSE (list) list of accumulated RMSE values for each
observation in consecutive time order i.e. the first return
value will be just the error of first prediction, second the
sqrt of mean squared error for first 2 predictions, etc."""
rmse = None
# Ensure predictions and test_data are same size
if len(y_pred) != len(y_true):
rmse = "Test data and predictions must have equal length"
# If last_only is false, return the rmse for every point in the prediction set
# (useful for tracking where model went wrong)
elif last_only == False:
rmse = [mean_squared_error(
y_true[:i+1], y_pred[:i+1], squared=False) for i, _ in enumerate(y_pred)]
# Normal case: return the rmse value for the full prediction set
else:
rmse = mean_squared_error(y_true, y_pred, squared=False)
return rmse
def SMAPE(y_true, y_pred):
"""Wrapper aroumd smape from pmdarima.metrics
Returns value of smape on a 0-100% scale
instead of 0-200% for interpretability see
https://en.wikipedia.org/wiki/Symmetric_mean_absolute_percentage_error
and https://alkaline-ml.com/pmdarima/modules/generated/pmdarima.metrics.smape.html
for reference"""
return smape(y_true, y_pred) / 2
def fit_sarimax(series, cfg=[(0, 1, 1), (0, 1, 1, 24), 'n'], test_start='2020-07-01 00:00:00'):
"""DOCSTRING accepts a series and SARIMAX configuration, returns model object, train/test sets"""
train = series[series.index < test_start]
test = series[test_start:]
model = SARIMAX(train, order=cfg[0], seasonal_order=cfg[1], trend=cfg[2])
return train, test, model
def sarima_configs(seasonal=[1, 24, 168]):
"""build configuration list to do light gridsearch of SARIMAX
models, function is from Jason Brownlee's website:
machinelearningmastery.com """
models = list()
# define config lists
p_params = [0, 1, 2]
d_params = [0, 1]
q_params = [0, 1, 2]
t_params = ['n', 'c', 't', 'ct']
P_params = [0, 1, 2]
D_params = [0, 1]
Q_params = [0, 1, 2]
m_params = seasonal
# create config instances
for p in p_params:
for d in d_params:
for q in q_params:
for t in t_params:
for P in P_params:
for D in D_params:
for Q in Q_params:
for m in m_params:
cfg = [(p, d, q), (P, D, Q, m), t]
models.append(cfg)
return models
def SARIMAX_error(series, p=10, d=2, q=2):
"""Simple wrapper that fits SARIMAX model and returns RMSE (raw and
pct) for the predictions, confidence interval, start of forecast
and end of actual values"""
X = series
# set trainset to include all but last 48 months (4 years)
# only training on data between 9-4 years ago
train_size = int(len(X) - 48)
train, test = X[-108:train_size], X[train_size:]
model = SARIMAX(train, order=(p, d, q), freq='MS',
initialization='approximate_diffuse')
results = model.fit()
# Predict 48 months from end of train set
forecast = results.get_forecast(steps=48)
pred_ci = forecast.conf_int(alpha=.05)
predictions = forecast.predicted_mean
rmse = RMSE(test, predictions)
# pct = error_as_pct(rmse, train[-1], test[-1])
return pred_ci, rmse, (train[-1], test[-1]) # , pct
def SARIMAX_forecast(series, cfg, pred_len):
"""DOCSTRING"""
X = series
# set trainset to include all but last 48 months (4 years)
# only training on data between 9-4 years ago
train_size = int(len(X) - pred_len)
train, test = X[0:train_size], X[train_size:]
model = SARIMAX(train, order=cfg[0], seasonal_order=cfg[1],
trend=cfg[2], initialization='approximate_diffuse')
results = model.fit()
# Predict 48 months from end of train set
forecast = results.predict(start=test.index[0], end=test.index[-1])
return forecast
# pred_ci = forecast.conf_int(alpha=.05)
# predictions = forecast.predicted_mean
# rmse = RMSE(test, predictions)
# pct = error_as_pct(rmse, train[-1], test[-1])
# ROI = (predictions[-1] - train[-1]) / train[-1]
# #return {'pred_ci': pred_ci, 'rmse': rmse, 'pct_error': pct, 'test': test, 'predictions': predictions, 'series': X}
# return ROI
``` |
{
"source": "jmillerbrooks/pipe-dash",
"score": 3
} |
#### File: jmillerbrooks/pipe-dash/tiger.py
```python
import pandas as pd
import streamlit as st
import re
import pydeck as pdk
import numpy as np
import altair as alt
applicants = pd.read_csv('./applicants.csv')
grants = pd.read_csv('./grants.csv')
# lat_midpoint = grants['lat'].median()
# lon_midpoint = grants['lon'].median()
min_grant, max_grant, med_grant = int(grants.Amount.min()), int(grants.Amount.max()), int(grants.Amount.median())
min_app, max_app = int(applicants['Funding Request'].min()), int(applicants['Funding Request'].max())
app_25, app_75 = int(applicants['Funding Request'].quantile(.25)), int(applicants['Funding Request'].quantile(.75))
st.set_page_config(layout='wide')
app_or_grant = st.selectbox(
'Show Applications or Grants?',
('Applications', 'Grants'))
if app_or_grant == 'Applications':
st.title('TIGER Applications')
st.subheader('Applicants')
applicant_entities = list(applicants.State.unique())
def entity_select():
return st.multiselect('Show Applications From:', options=applicant_entities, default=applicant_entities[0])
def slider_select(min_v, max_v, range_v):
return st.slider('Select a Range of Values', min_v, max_v, range_v)
def show_select():
return st.selectbox('Show Sum of Total:', options=['Funding Request', 'Project Cost'])
with st.sidebar:
entity_list = entity_select()
slider = slider_select(0, 3500, (0, 250))
# grant_range = st.slider('Select a Range of Values',\
# min_app, max_app, (app_25, app_75))
filtered = applicants[applicants.State.isin(entity_list)]
st.write(f'There are {len(filtered)} applications from the State(s) you selected. This represents {round(100*len(filtered)/len(applicants), 2)} percent of all applications.')
left_column, right_column = st.beta_columns((1, 2))
with left_column:
show_variable = show_select()
hist_values = filtered.groupby(['State', 'Round']).agg('sum')[show_variable].reset_index()
st.write(hist_values)
with right_column:
alt_chart = alt.Chart(hist_values).\
mark_bar().encode(
x='State',
y=show_variable,
color='State',
column='Round:O'
)
st.subheader(f'Total {show_variable} by Year')
st.altair_chart(alt_chart)
with st.beta_expander('Raw Data'):
st.write(applicants[applicants.State.isin(entity_list)])
# st.bar_chart(data=filtered['Applicant Name'].value_counts())
# st.map(filtered)
elif app_or_grant == 'Grants':
st.title('TIGER Grants Awarded')
min_grant_size = st.slider('Minimum Grant Size', min_grant, max_grant, med_grant, step=int((max_grant - min_grant)/100))
n_grants = len(grants[grants.Amount >= min_grant_size])
prop_grants = round((1 - (n_grants/len(grants))) * 100, 2)
st.write(f'{n_grants} grants awarded in amounts of at least {min_grant_size}. {prop_grants} percent of all grants awarded were less than {min_grant_size}.')
st.subheader('Grants Awarded Map (Guam Excluded)')
st.map(grants[(grants.lon < 0) & (grants.Amount >= min_grant_size)])
with st.beta_expander('Raw Data'):
st.write(grants)
# st.map(applicants[(grants.lon < 0) & (grants.Amount >= min_grant_size)])
# st.pydeck_chart(pdk.Deck(
# map_style='mapbox://styles/mapbox/light-v9',
# layers=[
# pdk.Layer(
# 'HexagonLayer',
# data=grants,
# get_position='[lon, lat]',
# radius=25000,
# elevation_scale=5000,
# elevation_range=[0, 1000],
# pickable=True,
# extruded=True,
# ),
# pdk.Layer(
# 'ScatterplotLayer',
# data=grants,
# get_position='[lon, lat]',
# get_color='[200, 30, 0, 160]',
# get_radius=200,
# ),
# ],
# ))
``` |
{
"source": "jmillerbrooks/scratch_models",
"score": 3
} |
#### File: jmillerbrooks/scratch_models/viz.py
```python
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import PolynomialFeatures
from linear_models import RidgeRegScratch
X, y = make_x_y(deg=9)
def make_x_y(deg=2):
X = np.array([*range(-100, 100)]).reshape(-1, 1) / 100
# rands = np.random.uniform(low=-0.05, high=0.05, size=X.shape[0]).reshape(-1,1)
# X += rands
poly_adder = PolynomialFeatures(degree=deg)
X = poly_adder.fit_transform(X)
thetas = np.array(np.random.randn(deg+1, 1)).reshape(-1, 1)
# return thetas
y = X.dot(thetas)
y += np.random.normal(loc=0, scale=.1, size=(len(y), 1))
return X, y
def plot_alphas(X, y, alphas=[0.0001, 10, 1000000000000], show_degree=1):
fig, (ax, ax1, ax2) = plt.subplots(1, len(alphas), figsize=(20, 10))
for alpha_, ax_ in zip(alphas, [ax, ax1, ax2]):
model = RidgeRegScratch(alpha=alpha_)
model.fit(X, y)
# uncomment the below line to show the predicted coefficients for each iteration of alpha
# note, that the coefficient theta_0 remains very similar, while all other coefficients
# get progressively smaller as alpha grows larger
# print(f'thetas for alpha = {alpha_}: {model.thetas.T}')
predictions = model.predict(X)
ax_.scatter(X[:, show_degree], y)
ax_.plot(X[:, show_degree], predictions, color='red')
ax_.set_title(f'Alpha = {alpha_}')
ax_.set_ylabel('y')
ax_.set_xlabel(f'X degree {show_degree}')
fig.suptitle(
'Ridge Regression model fits for different tuning parameters alpha', size=20)
fig.show()
return fig
fig_alphas = plot_alphas(X, y, show_degree=1)
``` |
{
"source": "jmillerca/opencanary_web",
"score": 2
} |
#### File: opencanary_web/handlers/whiteiplist.py
```python
import tornado
from handlers.base import BaseHandler
from util.auth import jwtauth
from service.whiteipservice import whiteips
# from dbs.dal.LogOperate import LogOp
import datetime
import json
@jwtauth
class WhiteiplistHandler(BaseHandler):
""" 获取白名单ip列表 """
def get(self):
res = ','.join(whiteips())
# json.dumps(line_res)
self.write(res)
``` |
{
"source": "jmillerkoren/Password-Manager",
"score": 2
} |
#### File: backend/pwmanager/serializers.py
```python
import uuid
from .models import VaultUser, Vault
from rest_framework import serializers
from django.contrib.auth import authenticate
from django.contrib.auth.hashers import make_password
class VaultUserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = VaultUser
fields = ['auth_key']
class RegistrationSerializer(serializers.HyperlinkedModelSerializer):
token = serializers.CharField(max_length=255, read_only=True)
auth_key = serializers.CharField(max_length=255)
email = serializers.CharField(max_length=255)
class Meta:
model = VaultUser
fields = ['auth_key', 'token', 'email']
def create(self, validated_data):
return VaultUser.objects.create_user(**validated_data)
class LoginSerializer(serializers.HyperlinkedModelSerializer):
token = serializers.CharField(max_length=255, read_only=True)
auth_key = serializers.CharField(max_length=255)
email = serializers.CharField(max_length=255)
class Meta:
model = VaultUser
fields = ['auth_key', 'token', 'email']
def validate(self, data):
auth_key = data.get('auth_key', None)
email = data.get('email', None)
if auth_key is None:
raise serializers.ValidationError('Auth token is required to login')
user = authenticate(email, auth_key=auth_key, email=email)
if user is None:
raise serializers.ValidationError('A user with provided credentials does not exist')
return {'token': user.token}
class VaultSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.UUIDField(required=False)
username = serializers.CharField(max_length=255)
password = serializers.CharField(max_length=255)
domain = serializers.CharField(max_length=255)
class Meta:
model = Vault
fields = ['username', 'password', 'domain', 'id']
def validate(self, data):
username = data.get('username', None)
if username is None:
raise serializers.ValidationError('Username must be provided')
password = data.get('password', None)
if password is None:
raise serializers.ValidationError('Password must be provided')
domain = data.get('domain', None)
if domain is None:
raise serializers.ValidationError('Domain must be provided')
return {
'username': username,
'password': password,
'domain': domain
}
def create(self, validated_data):
user = VaultUser.objects.get(pk=self.context.get('user_id'))
if user is not None:
vault_item = Vault(id=uuid.uuid4(), domain=validated_data['domain'], username=validated_data['username'],
password=validated_data['password'], vault_user=user)
vault_item.save()
return vault_item
``` |
{
"source": "jmilliaan/mppi_iot",
"score": 2
} |
#### File: mppi_iot/fast/video_raspi.py
```python
import cv2
import matplotlib.pyplot as plt
import time
from picamera.array import PiRGBArray as pi_rgb
from picamera import PiCamera as picam
confidence_threshold = 0.45 # Threshold to detect object
font = cv2.FONT_HERSHEY_COMPLEX
color = [255, 255, 255]
height = 320
width = 640
focal_length = 500
class PiCam:
def __init__(self):
self.cam = picam()
self.cam.resolution = (width, height)
self.cam.framerate = 30
self.raw_cap = pi_rgb(picam, size=self.cam.resolution)
time.sleep(0.1)
def focal_length(measured_distance, real_width, width_in_rf_image):
foc_length = (width_in_rf_image * measured_distance) / real_width
return foc_length
def distance_finder(foc_len, real_face_width, face_width_in_frame):
distance = (real_face_width * foc_len) / face_width_in_frame
return distance
camera = PiCam()
classFile = 'coco.names'
with open(classFile, 'rt') as f:
classNames = f.read().rstrip('\n').split('\n')
configPath = 'ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt'
weightsPath = 'frozen_inference_graph.pb'
net = cv2.dnn_DetectionModel(weightsPath, configPath)
net.setInputSize(320, 320)
net.setInputScale(1.0 / 127.5)
net.setInputMean((127.5, 127.5, 127.5))
net.setInputSwapRB(True)
for frame in camera.cam.capture_continuous(camera.raw_cap, format="bgr", use_video_port=True):
img = frame.array
class_ids, confidences, boundary_boxes = net.detect(img, confThreshold=confidence_threshold)
if len(class_ids) != 0:
for classId, confidence, box in zip(class_ids.flatten(), confidences.flatten(), boundary_boxes):
cv2.rectangle(img, box, color=color, thickness=2)
cv2.putText(img, classNames[classId - 1].upper(), (box[0] + 10, box[1] + 30), font, 1, color, 2)
cv2.putText(img, str(round(confidence * 100, 2)), (box[0] + 200, box[1] + 30), font, 1, color, 2)
cv2.imshow("IEE3061 IoT", img)
camera.raw_cap.truncate(0)
key = cv2.waitKey(1)
if key == 27:
break
cv2.destroyAllWindows()
```
#### File: jmilliaan/mppi_iot/mppi_frame.py
```python
import cv2
from scipy.ndimage.filters import gaussian_filter, convolve
class Frame:
def __init__(self, frame):
self.raw_frame = frame
self.bw = cv2.cvtColor(self.raw_frame, cv2.COLOR_BGR2GRAY)
self.canny_list = [[]]
def blur(self, sigma_val):
blurred = gaussian_filter(self.bw, sigma=sigma_val)
return blurred
def canny(self, minval, maxval, sigma_val):
cannydetector = cv2.Canny(self.blur(sigma_val), minval, maxval)
return cannydetector
def get_area_magnitude(self, bigmat, xloc, yloc, dim):
size = int(dim * 2 + 1)
x_loc_relative = xloc - dim
y_loc_relative = yloc - dim
mag = 0
try:
for x in range(size):
for y in range(size):
mag += not bigmat[y + y_loc_relative - dim][x + x_loc_relative - dim]
except IndexError:
pass
return mag
def reverse_knn_joy(self, img, dim):
bad_list = []
x_size = len(img[0])
y_size = len(img)
empty = (dim * 2 + 1) ** 2
full_threshold = empty // 20
for x in range(dim, x_size - dim):
for y in range(dim, y_size - dim):
current_mag = empty - self.get_area_magnitude(img, x, y, dim)
if current_mag >= full_threshold:
bad_list.append((x, y))
return bad_list
``` |
{
"source": "jmillikin/rules_javascript",
"score": 2
} |
#### File: rules_javascript/javascript/javascript.bzl
```python
load(
"//javascript/internal:providers.bzl",
_JavaScriptInfo = "JavaScriptInfo",
_JavaScriptModuleInfo = "JavaScriptModuleInfo",
)
load(
"//javascript/node:node.bzl",
_node_common = "node_common",
_node_register_toolchains = "node_register_toolchains",
)
load(
"//tools/babel:babel.bzl",
_babel_common = "babel_common",
_babel_register_toolchains = "babel_register_toolchains",
)
load(
"//tools/eslint:eslint.bzl",
_eslint_register_toolchains = "eslint_register_toolchains",
)
load(
"//tools/webpack:webpack.bzl",
_webpack_common = "webpack_common",
_webpack_register_toolchains = "webpack_register_toolchains",
)
load(
"//tools/yarn:yarn.bzl",
_yarn_register_toolchains = "yarn_register_toolchains",
)
load(
"//typescript:typescript.bzl",
_typescript_register_toolchains = "typescript_register_toolchains",
)
def _version(kwargs, prefix):
key = prefix + "_"
if key in kwargs:
return {"version": kwargs[key]}
return {}
def javascript_register_toolchains(**kwargs):
toolchains = dict(
babel = _babel_register_toolchains,
eslint = _eslint_register_toolchains,
node = _node_register_toolchains,
webpack = _webpack_register_toolchains,
typescript = _typescript_register_toolchains,
yarn = _yarn_register_toolchains,
)
for (kwarg_prefix, register) in toolchains.items():
register_kwargs = {}
for key, value in kwargs.items():
if key.startswith(kwarg_prefix + "_"):
register_kwargs[key[len(kwarg_prefix) + 1:]] = value
register(**register_kwargs)
# region Build Rules {{{
def _module_name(ctx, src):
# TODO: adjust 'module_prefix' based on {strip_,}import_prefix
return src.short_path[:-len(".js")]
def _js_library(ctx):
direct_sources = []
direct_modules = []
if ctx.attr.src:
direct_sources = depset(direct = ctx.files.src)
direct_modules.append(_JavaScriptModuleInfo(
name = _module_name(ctx, ctx.file.src),
files = direct_sources,
source = struct(
path = ctx.file.src.path,
short_path = ctx.file.src.short_path,
),
))
deps = [dep[_JavaScriptInfo] for dep in ctx.attr.deps]
return _JavaScriptInfo(
direct_modules = direct_modules,
direct_sources = direct_sources,
transitive_sources = depset(
direct = ctx.files.src,
transitive = [dep.transitive_sources for dep in deps],
),
transitive_modules = depset(
direct = direct_modules,
transitive = [dep.transitive_modules for dep in deps],
),
)
js_library = rule(
_js_library,
attrs = {
"src": attr.label(
allow_single_file = [".js"],
),
"deps": attr.label_list(
providers = [_JavaScriptInfo],
),
"import_prefix": attr.string(),
"strip_import_prefix": attr.string(),
},
provides = [_JavaScriptInfo],
)
def _js_binary_babel(ctx, dep_modules):
babel_toolchain = ctx.attr._babel_toolchain[_babel_common.ToolchainInfo]
babel_config_file = ctx.actions.declare_file("_babel/{}/config.js".format(
ctx.attr.name,
))
preset_env = _babel_common.preset(
babel_toolchain.babel_modules["@babel/preset-env"],
{"targets": {"node": "current"}},
)
babel_config = _babel_common.create_config(
ctx.actions,
babel_toolchain = babel_toolchain,
output_file = babel_config_file,
presets = [preset_env],
)
babel_modules = []
for dep_module in dep_modules:
babel_out = ctx.actions.declare_file("_babel_out/{}/{}.js".format(
ctx.attr.name,
dep_module.name,
))
babel_modules.append(_JavaScriptModuleInfo(
name = dep_module.name,
files = depset(direct = [babel_out]),
source = struct(
path = babel_out.path,
short_path = babel_out.short_path,
),
))
_babel_common.compile(
ctx.actions,
babel_toolchain = babel_toolchain,
babel_config = babel_config,
module = dep_module,
output_file = babel_out,
babel_arguments = ctx.attr.babel_options,
)
babel_out = ctx.actions.declare_file("_babel_out/{}/{}".format(
ctx.attr.name,
ctx.file.src.short_path,
))
main_babel_out = babel_out
_babel_common.compile(
ctx.actions,
babel_toolchain = babel_toolchain,
babel_config = babel_config,
module = _JavaScriptModuleInfo(
files = depset(direct = ctx.files.src),
source = ctx.file.src,
),
output_file = babel_out,
babel_arguments = ctx.attr.babel_options,
)
return struct(
main_js = main_babel_out,
modules = babel_modules,
)
_JS_BINARY_WEBPACK_CONFIG = """
const path = require("path");
const webpack = require(path.resolve(process.cwd(), CONFIG.webpack));
let resolve_aliases = {};
CONFIG.resolve_aliases.forEach(item => {
resolve_aliases[item[0]] = path.resolve(process.cwd(), item[1]);
});
module.exports = {
mode: "production",
target: "node",
plugins: [new webpack.BannerPlugin({
banner: "/*! NODE_EXECUTABLE */",
raw: true,
entryOnly: true,
})],
output: { path: process.cwd() },
resolve: { alias: resolve_aliases },
};
"""
def _js_binary_webpack(ctx, babel_out):
webpack_toolchain = ctx.attr._webpack_toolchain[_webpack_common.ToolchainInfo]
webpack_config_file = ctx.actions.declare_file("_webpack/{}/config.js".format(ctx.attr.name))
ctx.actions.write(
webpack_config_file,
"const CONFIG = {};".format(struct(
webpack = webpack_toolchain.webpack_modules["webpack"].source.path,
resolve_aliases = [[mod.name, mod.source.path] for mod in babel_out.modules],
).to_json()) + _JS_BINARY_WEBPACK_CONFIG,
)
webpack_config = _webpack_common.WebpackConfigInfo(
webpack_config_file = webpack_config_file,
files = depset(
direct = [webpack_config_file],
transitive = [mod.files for mod in babel_out.modules],
),
)
webpack_out = ctx.actions.declare_file("_webpack_out/{}/bundle.js".format(ctx.attr.name))
_webpack_common.bundle(
ctx.actions,
webpack_toolchain = webpack_toolchain,
webpack_config = webpack_config,
entries = [babel_out.main_js],
output_file = webpack_out,
webpack_arguments = ctx.attr.webpack_options,
)
return struct(
bundle_js = webpack_out,
)
def _js_binary(ctx):
node_toolchain = ctx.attr._node_toolchain[_node_common.ToolchainInfo]
dep_modules = depset(
transitive = [
dep[_JavaScriptInfo].transitive_modules
for dep in ctx.attr.deps
],
)
babel_out = _js_binary_babel(ctx, dep_modules)
webpack_out = _js_binary_webpack(ctx, babel_out)
out_plain = ctx.actions.declare_file(ctx.attr.name)
out_exec = ctx.actions.declare_file(ctx.attr.name + ".hermetic.js")
ctx.actions.expand_template(
template = webpack_out.bundle_js,
output = out_plain,
substitutions = {
"/*! NODE_EXECUTABLE */": "#!/usr/bin/env node\n",
},
is_executable = True,
)
ctx.actions.expand_template(
template = webpack_out.bundle_js,
output = out_exec,
substitutions = {
"/*! NODE_EXECUTABLE */": "#!{}\n".format(
node_toolchain.node_executable.path,
),
},
is_executable = True,
)
return DefaultInfo(
files = depset(direct = [out_plain]),
executable = out_exec,
runfiles = ctx.runfiles(
files = ctx.files.src,
transitive_files = node_toolchain.files,
),
)
js_binary = rule(
_js_binary,
executable = True,
attrs = {
"src": attr.label(
allow_single_file = [".js"],
mandatory = True,
),
"deps": attr.label_list(
providers = [_JavaScriptInfo],
),
"babel_options": attr.string_list(),
"webpack_options": attr.string_list(),
"_node_toolchain": attr.label(
default = "//javascript/node:toolchain",
),
"_babel_toolchain": attr.label(
default = "//tools/babel:toolchain",
),
"_webpack_toolchain": attr.label(
default = "//tools/webpack:toolchain",
),
},
)
# endregion }}}
```
#### File: tools/eslint/eslint.bzl
```python
load(
"//javascript/node:node.bzl",
_node_common = "node_common",
)
load(
"//tools/yarn/internal:yarn_vendor.bzl",
_yarn_vendor_modules = "yarn_vendor_modules",
)
load(
"//tools/eslint/internal:toolchain.bzl",
_TOOLCHAIN_TYPE = "TOOLCHAIN_TYPE",
_ToolchainInfo = "EslintToolchainInfo",
)
# region Versions {{{
_LATEST = "5.13.0"
_VERSIONS = ["5.13.0"]
def _check_version(version):
if version not in _VERSIONS:
fail("ESLint version {} not supported by rules_javascript".format(repr(version)))
# endregion }}}
eslint_common = struct(
VERSIONS = _VERSIONS,
ToolchainInfo = _ToolchainInfo,
TOOLCHAIN_TYPE = _TOOLCHAIN_TYPE,
)
def eslint_register_toolchains(version = _LATEST):
_check_version(version)
repo_name = "eslint_v{}".format(version)
if repo_name not in native.existing_rules().keys():
eslint_repository(
name = repo_name,
version = version,
)
native.register_toolchains("@rules_javascript//tools/eslint/toolchains:v{}".format(version))
# region Repository Rules {{{
def _eslint_repository(ctx):
version = ctx.attr.version
_check_version(version)
vendor_dir = "@rules_javascript//tools/eslint/internal:eslint_v" + version
_yarn_vendor_modules(ctx, vendor_dir, tools = {
"eslint": "eslint/bin/eslint.js",
})
eslint_repository = repository_rule(
_eslint_repository,
attrs = {
"version": attr.string(mandatory = True),
"registries": attr.string_list(
default = _node_common.NPM_REGISTRIES,
),
},
)
# endregion }}}
```
#### File: yarn/internal/toolchain.bzl
```python
load(
"//javascript/node:node.bzl",
_node_common = "node_common",
)
TOOLCHAIN_TYPE = "@rules_javascript//tools/yarn:toolchain_type"
YarnToolchainInfo = provider(fields = ["files", "vars", "yarn_executable"])
def _yarn_toolchain_info(ctx):
node_toolchain = ctx.attr._node_toolchain[_node_common.ToolchainInfo]
runfiles = ctx.attr.yarn[DefaultInfo].default_runfiles.files
toolchain = YarnToolchainInfo(
yarn_executable = ctx.executable.yarn,
files = depset(
direct = [ctx.executable.yarn],
transitive = [
runfiles,
node_toolchain.files,
],
),
vars = {"YARN": ctx.executable.yarn.path},
)
return [
platform_common.ToolchainInfo(yarn_toolchain = toolchain),
platform_common.TemplateVariableInfo(toolchain.vars),
]
yarn_toolchain_info = rule(
_yarn_toolchain_info,
attrs = {
"yarn": attr.label(
mandatory = True,
executable = True,
cfg = "host",
),
"_node_toolchain": attr.label(
default = "//javascript/node:toolchain",
),
},
provides = [
platform_common.ToolchainInfo,
platform_common.TemplateVariableInfo,
],
)
def _yarn_toolchain_alias(ctx):
toolchain = ctx.toolchains[TOOLCHAIN_TYPE].yarn_toolchain
return [
DefaultInfo(files = toolchain.files),
toolchain,
platform_common.TemplateVariableInfo(toolchain.vars),
]
yarn_toolchain_alias = rule(
_yarn_toolchain_alias,
toolchains = [TOOLCHAIN_TYPE],
provides = [
DefaultInfo,
YarnToolchainInfo,
platform_common.TemplateVariableInfo,
],
)
``` |
{
"source": "jmillxyz/fleece",
"score": 3
} |
#### File: fleece/cli/main.py
```python
import sys
import pkg_resources
commands = ['build', 'run', 'config']
def print_help():
print('Available sub-commands: {}.'.format(', '.join(commands)))
print('Use "fleece <sub-command> --help" for usage.')
def main():
if len(sys.argv) == 1:
print_help()
sys.exit(0)
if sys.argv[1] in commands:
# Check that the CLI dependencies are installed before executing the
# command.
deps = pkg_resources.get_distribution('fleece')._dep_map.get(
'cli', [])
for dep in deps:
try:
# PyYAML really messes this up.
if dep.project_name == 'PyYAML':
__import__('yaml')
else:
__import__(dep.project_name)
except ImportError:
print('Dependency "{}" is not installed. Did you run '
'"pip install fleece[cli]"?'.format(dep))
sys.exit(1)
# execute the command
module = __import__('fleece.cli.' + sys.argv[1])
module = getattr(module.cli, sys.argv[1])
getattr(module, 'main')(sys.argv[2:])
else:
if sys.argv[1] not in ['--help', '-h']:
print('"{}" is not an available fleece sub-command.'.format(
sys.argv[1]))
print_help()
```
#### File: fleece/tests/test_cli_config.py
```python
import six
import base64
import json
import os
import re
import sys
import unittest
import yaml
from fleece.cli.config import config
if six.PY2:
import mock
from StringIO import StringIO
# fullmatch is not available on PY2
def fullmatch(pattern, text, *args, **kwargs):
match = re.match(pattern, text, *args, **kwargs)
return match if match.group(0) == text else None
re.fullmatch = fullmatch
else:
from unittest import mock
from io import StringIO
TEST_CONFIG = '.config.tmp'
test_yaml_config = '''stages:
/.*/:
environment: dev
key: dev-key
prod:
environment: prod
key: prod-key
config:
foo: bar
password:
+dev: :encrypt:dev-password
+prod: :encrypt:prod-password
+foo: :encrypt:foo-password
+/ba.*/: :encrypt:bar-password
'''
test_json_config = '''{
"stages": {
"/.*/": {
"environment": "dev",
"key": "dev-key"
},
"prod": {
"environment": "prod",
"key": "prod-key"
}
},
"config": {
"foo": "bar",
"password": {
"+dev": ":encrypt:dev-password",
"+prod": ":encrypt:prod-password"
}
}
}
'''
test_config_file = '''stages:
/.*/:
environment: dev
key: dev-key
prod:
environment: prod
key: prod-key
config:
foo: bar
password:
+dev: :decrypt:ZGV2OmRldi1wYXNzd29yZA==
+prod: :decrypt:cHJvZDpwcm9kLXBhc3N3b3Jk
+foo: :decrypt:Zm9vOmZvby1wYXNzd29yZA==
+/ba.*/: :decrypt:L2JhLiovOmJhci1wYXNzd29yZA=='''
test_environments = {
'environments': [
{'name': 'dev', 'account': '1234567890'},
{'name': 'prod', 'account': '0987654321'}
]
}
def mock_encrypt(text, stage):
return base64.b64encode('{}:{}'.format(stage, text).encode(
'utf-8')).decode('utf-8')
def mock_decrypt(text, stage):
s, d = base64.b64decode(text.encode('utf-8')).decode('utf-8').split(':', 1)
stage = stage.split(':')[-1]
if s != stage and not re.fullmatch(s.split('/')[1], stage):
raise RuntimeError('wrong stage:' + s + ':' + stage)
return d
@mock.patch('fleece.cli.config.config._encrypt_text', new=mock_encrypt)
@mock.patch('fleece.cli.config.config._decrypt_text', new=mock_decrypt)
@mock.patch('fleece.cli.run.run.get_config', return_value=test_environments)
class TestCLIConfig(unittest.TestCase):
def tearDown(self):
if os.path.exists(TEST_CONFIG):
os.unlink(TEST_CONFIG)
def test_import_yaml_config(self, *args):
stdin = sys.stdin
sys.stdin = StringIO(test_yaml_config)
config.main(['-c', TEST_CONFIG, 'import'])
sys.stdin = stdin
with open(TEST_CONFIG, 'rt') as f:
data = yaml.load(f.read())
self.assertEqual(data, {
'stages': {
'/.*/': {'environment': 'dev', 'key': 'dev-key'},
'prod': {'environment': 'prod', 'key': 'prod-key'}
},
'config': {
'foo': 'bar',
'password': {
'+dev': ':decrypt:ZGV2OmRldi1wYXNzd29yZA==',
'+prod': ':decrypt:cHJvZDpwcm9kLXBhc3N3b3Jk',
'+foo': ':decrypt:Zm9vOmZvby1wYXNzd29yZA==',
'+/ba.*/': ':decrypt:L2JhLiovOmJhci1wYXNzd29yZA=='
}
}
})
def test_import_json_config(self, *args):
stdin = sys.stdin
sys.stdin = StringIO(test_json_config)
config.main(['-c', TEST_CONFIG, 'import'])
sys.stdin = stdin
with open(TEST_CONFIG, 'rt') as f:
data = yaml.load(f.read())
self.assertEqual(data, {
'stages': {
'/.*/': {'environment': 'dev', 'key': 'dev-key'},
'prod': {'environment': 'prod', 'key': 'prod-key'}
},
'config': {
'foo': 'bar',
'password': {
'+dev': ':decrypt:ZGV2OmRldi1wYXNzd29yZA==',
'+prod': ':decrypt:cHJvZDpwcm9kLXBhc3N3b3Jk'
}
}
})
def test_export_yaml_config(self, *args):
stdout = sys.stdout
sys.stdout = StringIO()
with open(TEST_CONFIG, 'wt') as f:
f.write(test_config_file)
config.main(['-c', TEST_CONFIG, 'export'])
sys.stdout.seek(0)
data = sys.stdout.read()
sys.stdout = stdout
self.assertEqual(yaml.load(data), {
'stages': {
'/.*/': {'environment': 'dev', 'key': 'dev-key'},
'prod': {'environment': 'prod', 'key': 'prod-key'}
},
'config': {
'foo': 'bar',
'password': {
'+dev': ':encrypt:dev-password',
'+prod': ':encrypt:prod-password',
'+foo': ':encrypt:foo-password',
'+/ba.*/': ':encrypt:bar-password'
}
}
})
def test_export_json_config(self, *args):
stdout = sys.stdout
sys.stdout = StringIO()
with open(TEST_CONFIG, 'wt') as f:
f.write(test_config_file)
config.main(['-c', TEST_CONFIG, 'export', '--json'])
sys.stdout.seek(0)
data = sys.stdout.read()
sys.stdout = stdout
self.assertEqual(json.loads(data), {
'stages': {
'/.*/': {'environment': 'dev', 'key': 'dev-key'},
'prod': {'environment': 'prod', 'key': 'prod-key'}
},
'config': {
'foo': 'bar',
'password': {
'+dev': ':encrypt:dev-password',
'+prod': ':encrypt:prod-password',
'+foo': ':encrypt:foo-password',
'+/ba.*/': ':encrypt:bar-password'
}
}
})
def test_render_yaml_config(self, *args):
stdout = sys.stdout
sys.stdout = StringIO()
with open(TEST_CONFIG, 'wt') as f:
f.write(test_config_file)
config.main(['-c', TEST_CONFIG, 'render', 'dev'])
sys.stdout.seek(0)
data = sys.stdout.read()
sys.stdout = stdout
self.assertEqual(yaml.load(data), {
'foo': 'bar',
'password': '<PASSWORD>'
})
def test_render_yaml_config_custom(self, *args):
stdout = sys.stdout
sys.stdout = StringIO()
with open(TEST_CONFIG, 'wt') as f:
f.write(test_config_file)
config.main(['-c', TEST_CONFIG, 'render', 'foo'])
sys.stdout.seek(0)
data = sys.stdout.read()
sys.stdout = stdout
self.assertEqual(yaml.load(data), {
'foo': 'bar',
'password': '<PASSWORD>'
})
def test_render_yaml_config_custom_regex(self, *args):
stdout = sys.stdout
sys.stdout = StringIO()
with open(TEST_CONFIG, 'wt') as f:
f.write(test_config_file)
config.main(['-c', TEST_CONFIG, 'render', 'baz'])
sys.stdout.seek(0)
data = sys.stdout.read()
sys.stdout = stdout
self.assertEqual(yaml.load(data), {
'foo': 'bar',
'password': '<PASSWORD>'
})
def test_render_json_config(self, *args):
stdout = sys.stdout
sys.stdout = StringIO()
with open(TEST_CONFIG, 'wt') as f:
f.write(test_config_file)
config.main(['-c', TEST_CONFIG, 'render', 'prod', '--json'])
sys.stdout.seek(0)
data = sys.stdout.read()
sys.stdout = stdout
self.assertEqual(json.loads(data), {
'foo': 'bar',
'password': '<PASSWORD>'
})
def test_render_encrypted_config(self, *args):
stdout = sys.stdout
sys.stdout = StringIO()
with open(TEST_CONFIG, 'wt') as f:
f.write(test_config_file)
config.main(['-c', TEST_CONFIG, 'render', 'prod', '--encrypt'])
sys.stdout.seek(0)
data = sys.stdout.read()
sys.stdout = stdout
self.assertEqual(
json.loads(mock_decrypt(json.loads(data)[0], 'prod')), {
'foo': 'bar',
'password': '<PASSWORD>'
})
def test_render_python_config(self, *args):
stdout = sys.stdout
sys.stdout = StringIO()
with open(TEST_CONFIG, 'wt') as f:
f.write(test_config_file)
config.main(['-c', TEST_CONFIG, 'render', 'prod', '--python'])
sys.stdout.seek(0)
data = sys.stdout.read()
sys.stdout = stdout
g = {'ENCRYPTED_CONFIG': None}
exec(data.split('\n')[0], g)
data = mock_decrypt(g['ENCRYPTED_CONFIG'][0], 'prod')
self.assertEqual(json.loads(data), {
'foo': 'bar',
'password': '<PASSWORD>'
})
```
#### File: fleece/tests/test_httperror.py
```python
import unittest
from fleece import httperror
class HTTPErrorTests(unittest.TestCase):
"""Tests for :class:`fleece.httperror.HTTPError`."""
def test_error_msg_format(self):
with self.assertRaises(httperror.HTTPError) as err:
raise httperror.HTTPError(status=404)
self.assertEqual('404: Not Found', str(err.exception))
def test_error_msg_format_custom_message(self):
with self.assertRaises(httperror.HTTPError) as err:
raise httperror.HTTPError(status=404, message='Nothing Here')
self.assertEqual(
'404: Not Found - Nothing Here', str(err.exception)
)
```
#### File: fleece/tests/test_log.py
```python
import logging
import mock
import unittest
import uuid
from fleece.log import setup_root_logger, get_logger, RetryHandler
setup_root_logger()
class TestLogHandler(logging.Handler):
def __init__(self, fail=1):
super(TestLogHandler, self).__init__()
self.fail = fail
self.attempt = 0
self.log = []
def emit(self, record):
self.attempt += 1
if self.attempt <= self.fail:
raise RuntimeError(str(self.attempt))
record.msg = record.msg.replace('foo', 'foo-' + str(self.attempt))
self.log.append(record)
class LogTests(unittest.TestCase):
def setUp(self):
self.logger = get_logger(uuid.uuid4().hex)
def test_retry_handler_with_retries(self):
h = TestLogHandler(fail=2)
self.logger.addHandler(RetryHandler(h, max_retries=5))
self.logger.error('foo')
self.assertEqual(len(h.log), 1)
self.assertIn('event": "foo-3"', h.log[0].getMessage())
@mock.patch('fleece.log.time.sleep')
@mock.patch('fleece.log.random', return_value=1)
def test_retry_handler_with_max_retries(self, mock_random, mock_sleep):
h = TestLogHandler(fail=3)
self.logger.addHandler(RetryHandler(h, max_retries=2))
self.logger.error('foo')
self.assertEqual(h.log, [])
self.assertEqual(mock_sleep.call_count, 2)
self.assertEqual(mock_sleep.call_args_list[0], mock.call(0.1))
self.assertEqual(mock_sleep.call_args_list[1], mock.call(0.2))
def test_retry_handler_with_max_retries_and_raise(self):
h = TestLogHandler(fail=3)
self.logger.addHandler(RetryHandler(h, max_retries=2,
ignore_errors=False))
with self.assertRaises(RuntimeError) as r:
self.logger.error('foo')
self.assertEqual(str(r.exception), '1')
self.assertEqual(h.log, [])
def test_retry_handler_no_retries(self):
h = TestLogHandler(fail=1)
self.logger.addHandler(RetryHandler(h, max_retries=0))
self.logger.error('foo')
self.assertEqual(h.log, [])
def test_retry_handler_no_retries_and_raise(self):
h = TestLogHandler(fail=1)
self.logger.addHandler(RetryHandler(h, max_retries=0,
ignore_errors=False))
with self.assertRaises(RuntimeError) as r:
self.logger.error('foo')
self.assertEqual(str(r.exception), '1')
self.assertEqual(h.log, [])
@mock.patch('fleece.log.time.sleep')
@mock.patch('fleece.log.random', return_value=1)
def test_retry_handler_with_custom_backoff(self, mock_random, mock_sleep):
h = TestLogHandler(fail=4)
self.logger.addHandler(RetryHandler(h, max_retries=4, backoff_base=0.4,
backoff_cap=1.2))
self.logger.error('foo')
self.assertEqual(len(h.log), 1)
self.assertIn('event": "foo-5"', h.log[0].getMessage())
self.assertEqual(mock_sleep.call_count, 4)
self.assertEqual(mock_sleep.call_args_list[0], mock.call(0.4))
self.assertEqual(mock_sleep.call_args_list[1], mock.call(0.8))
self.assertEqual(mock_sleep.call_args_list[2], mock.call(1.2))
self.assertEqual(mock_sleep.call_args_list[3], mock.call(1.2))
```
#### File: fleece/tests/test_raxauth.py
```python
import mock
import unittest
from fleece.httperror import HTTPError
from fleece.raxauth import authenticate
from . import utils
def mock_validation(token):
if token == utils.TEST_TOKEN:
return utils.USER_DATA
else:
raise HTTPError(status=401)
@authenticate()
def authentication_test(token=None, userinfo=None):
return "AUTHENTICATED"
class TestRaxAuth(unittest.TestCase):
@mock.patch('fleece.raxauth.validate', side_effect=mock_validation)
def test_raxauth(self, validation_function):
result = authentication_test(token=utils.TEST_TOKEN, userinfo=None)
self.assertEqual(result, 'AUTHENTICATED')
@mock.patch('fleece.raxauth.validate', side_effect=mock_validation)
def test_unauthorized_empty(self, validation_function):
self.assertRaisesRegexp(HTTPError, '401: Unauthorized',
authentication_test, token='bogus')
``` |
{
"source": "jmillxyz/sunscreen",
"score": 3
} |
#### File: jmillxyz/sunscreen/sunscreen.py
```python
import json
import os
import warnings
from appdirs import user_config_dir
import arrow
from arrow.factory import ArrowParseWarning
import colored
from colored import stylize
import click
import requests
# Suppress warnings about using arrow.get() without a format string
# https://github.com/crsmithdev/arrow/issues/612
warnings.simplefilter("ignore", ArrowParseWarning)
SUN_FACE = "\U0001f31e"
BAR_CHART = "\U0001f4ca"
class UpstreamError(RuntimeError):
pass
class UVForecast:
def __init__(self, epa_resp):
self.today = self._lookup_time()
self.readings = self._interpret(epa_resp)
self.max = self._max()
def _lookup_time(self):
return arrow.utcnow()
def _interpret(self, epa_data):
today = []
for hour in epa_data:
# TODO: map zipcode to timezone for storage later
normalized_datetime = arrow.get(hour["DATE_TIME"], "MMM/DD/YYYY HH A")
order = hour["ORDER"]
uv = hour["UV_VALUE"]
today.append(
{"order": order, "datetime": normalized_datetime, "uv_value": uv}
)
return today
def _max(self):
return max(a["uv_value"] for a in self.readings)
class ConfigFileHandler:
def __init__(self):
self.cfg_dir = user_config_dir(appname="sunscreen", appauthor=False)
os.makedirs(self.cfg_dir, exist_ok=True)
self.cfg_path = os.path.join(self.cfg_dir, "sunscreen.cfg")
def save_zip_to_file(self, zipcode):
config = {"zipcode": zipcode}
with open(self.cfg_path, "w") as f:
json.dump(config, f)
def get_zip_from_file(self):
if os.path.isfile(self.cfg_path):
with open(self.cfg_path) as f:
config = json.load(f)
return config.get("zipcode", None)
else:
return None
def get_local_zip():
cfg_handler = ConfigFileHandler()
saved_zipcode = cfg_handler.get_zip_from_file() or None
zipcode = click.prompt("Enter US zipcode", default=saved_zipcode, type=str)
# TODO: ensure zipcode is legit
if saved_zipcode is None:
cfg_handler.save_zip_to_file(zipcode)
return zipcode
def get_todays_uv_data(zipcode):
click.echo("Retrieving today's UV data...")
epa_url = (
"https://iaspub.epa.gov/enviro/efservice/"
f"getEnvirofactsUVHOURLY/ZIP/{zipcode}/json"
)
req = requests.get(epa_url)
if req.status_code != 200:
# couldn't get the stream!
raise UpstreamError
return UVForecast(req.json())
def graph_uv_data(uv_forecast):
# print legend
print("Time ", end="")
max_uv = uv_forecast.max
SPACE = " "
for val in range(1, max_uv + 1):
if val < 3:
print(stylize(SPACE, colored.bg("chartreuse_3b")), end="") # green
elif val < 6:
print(stylize(SPACE, colored.bg("yellow_1")), end="") # yellow
elif val < 8:
print(stylize(SPACE, colored.bg("orange_1")), end="") # orange
elif val < 10:
print(stylize(SPACE, colored.bg("red")), end="") # red
else:
print(stylize(SPACE, colored.bg("purple_1b")), end="") # purple
# UV values header, also adds newline
print(" UV level")
# TODO: use the colors above as background for the chart below
# print each hour's time + UV in chart if there's any UV
for hour in uv_forecast.readings:
uv = hour["uv_value"]
if uv > 0:
print(hour["datetime"].format("HHmm"), end=" ")
print(pad(data="*" * uv, limit=max_uv + 1) + f"{uv}")
def pad(data, limit):
"""Pad a given string `data` by spaces to length `limit`."""
if len(data) > limit:
raise Exception(
f"length of string {data} is higher than your requested limit, {limit}"
)
return data + (limit - len(data)) * " "
@click.command()
def main():
click.echo(f"Welcome to sunscreen! {SUN_FACE} {BAR_CHART}")
# TODO: add option to specify a new zip code as arg
zipcode = get_local_zip()
try:
uv_data = get_todays_uv_data(zipcode)
graph_uv_data(uv_data)
except UpstreamError:
print("The upstream data source is having connectivity problems!")
except requests.exceptions.ConnectionError:
print("Having trouble connecting, check your network settings.")
``` |
{
"source": "jmilosze/wfrp-hammergen",
"score": 2
} |
#### File: deployment/gcp/deploy.py
```python
import json
import shutil
import sys
from pathlib import Path
import os
import subprocess
import argparse
SCRIPT_DIR = Path(__file__).parent.absolute()
ROOT_DIR = SCRIPT_DIR.parent.parent.absolute()
FRONTEND_DIR = ROOT_DIR / "src" / "frontend"
WEB_DIR = ROOT_DIR / "src"
def build_new_static(env):
shutil.rmtree("dist", ignore_errors=True)
if env == "prod":
output = subprocess.run("npm run-script build_gcp_prod", shell=True, capture_output=True)
else:
output = subprocess.run("npm run-script build_gcp_staging", shell=True, capture_output=True)
print(output.stdout.decode())
print(output.stderr.decode())
def deploy_static(env):
if "PYCHARM_HOSTED" in os.environ:
del os.environ["PYCHARM_HOSTED"]
output = subprocess.run(f"firebase deploy --only hosting:{env}", shell=True, capture_output=True)
print(output.stdout.decode())
print(output.stderr.decode())
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument(
"env",
choices=["prod", "staging"],
help="Allowed values: prod, staging.",
)
parser.add_argument(
"part",
choices=["frontend", "api", "all"],
help="Allowed values: frontend, api, all.",
)
return parser.parse_args()
def read_deployment_config(env):
with open(SCRIPT_DIR / f"{env}" / f"config.json") as f:
return json.loads(f.read())
def run_and_output(command):
output = subprocess.run(command, shell=True, capture_output=True)
print(output.stdout.decode())
print(output.stderr.decode())
def docker_build_and_push(deploy_config):
image_name = deploy_config["image_name"]
ar_registry = deploy_config["ar_registry"]
ar_prefix = deploy_config["project"] + "/" + deploy_config["ar_repository"]
if "PYCHARM_HOSTED" in os.environ:
del os.environ["PYCHARM_HOSTED"]
run_and_output(f"gcloud auth configure-docker {ar_registry} --quiet")
run_and_output(f"docker build -f Dockerfile -t {ar_registry}/{ar_prefix}/{image_name} .")
run_and_output(f"docker push {ar_registry}/{ar_prefix}/{image_name}")
def deploy_to_cloud_run(deploy_config):
service_name = deploy_config["service_name"]
region = deploy_config["region"]
project = deploy_config["project"]
concurrency = deploy_config["concurrency"]
image = "/".join(
[deploy_config["ar_registry"], project, deploy_config["ar_repository"], deploy_config["image_name"]])
env_vars = ",".join([f"{k}=\"{v}\"" for k, v in deploy_config["env_variables"].items()])
command = f"gcloud run deploy {service_name} --region={region} --project={project} --image={image} " \
f"--allow-unauthenticated --concurrency={concurrency} --set-env-vars={env_vars}"
run_and_output(command)
if __name__ == "__main__":
ARGS = parse_arguments()
if ARGS.env == "prod":
print("Are you sure you want to deploy to prod?")
x = input()
if x != "yes":
sys.exit()
DEPLOY_CONFIG = read_deployment_config(ARGS.env)
if ARGS.part in ["frontend", "all"]:
os.chdir(FRONTEND_DIR)
build_new_static(ARGS.env)
os.chdir(WEB_DIR)
deploy_static(ARGS.env)
if ARGS.part in ["api", "all"]:
os.chdir(WEB_DIR)
docker_build_and_push(DEPLOY_CONFIG)
deploy_to_cloud_run(DEPLOY_CONFIG)
```
#### File: deployment/k8s/build_images.py
```python
import argparse
import subprocess
from pathlib import Path
import docker
SCRIPT_DIR = Path(__file__).parent.absolute()
ROOT_DIR = SCRIPT_DIR.parent.parent.absolute()
FRONTEND_DIR = ROOT_DIR / "src" / "frontend"
WEB_DIR = ROOT_DIR / "src"
API_IMAGE_FULL_PATH = "dirac1234/hammergen"
DIST_IMAGE_FULL_PATH = "dirac1234/hammergen_dist"
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("env", choices=["prod", "staging"], help="Allowed values: prod, staging.")
return parser.parse_args()
def build_image(image_full_path, dockerfile, buildargs=None):
if not buildargs:
buildargs = {}
client = docker.from_env()
_, logs = client.images.build(
path=str(WEB_DIR),
tag=image_full_path,
dockerfile=dockerfile,
buildargs=buildargs
)
for log in logs:
for k, v in log.items():
print(v, end="") if k == "stream" else print(v)
def push_image(image_full_path):
run_and_output(f"docker push {image_full_path}")
def run_and_output(command):
output = subprocess.run(command, shell=True, capture_output=True)
print(output.stdout.decode())
print(output.stderr.decode())
def apply(yaml_file):
run_and_output(f"kubectl apply -f {yaml_file}")
if __name__ == "__main__":
ARGS = parse_arguments()
build_image(API_IMAGE_FULL_PATH, "Dockerfile")
# push_image(API_IMAGE_FULL_PATH)
if ARGS.env == "prod":
buildargs = {"build": "build_container_prod"}
else:
buildargs = {"build": "build_container_staging"}
build_image(DIST_IMAGE_FULL_PATH, "Dockerfile_build_dist", buildargs)
# push_image(DIST_IMAGE_FULL_PATH)
```
#### File: wfrp-hammergen/migration/atlas_api.py
```python
import argparse
import json
import requests
from requests.auth import HTTPDigestAuth
PROJECT_ID = '5c3a00fc55385501d1df9fad'
BASE_URL = 'https://cloud.mongodb.com/api/atlas/v1.0'
URL = f'/groups/{PROJECT_ID}/whitelist'
def parse_input():
parser = argparse.ArgumentParser()
parser.add_argument('public_key', help="API private key")
parser.add_argument('private_key', help="API private key")
parser.add_argument('ip_list', help="API private key")
return parser.parse_args()
def main(ip_list_file, public_key, private_key):
with open(ip_list_file, 'rt') as file:
file_data = json.loads(file.read())
post_data = [{"ipAddress": x, 'comment': file_data['comment']} for x in file_data['ip_list']]
resp = requests.post(BASE_URL + URL, auth=HTTPDigestAuth(public_key, private_key), json=post_data)
# ret = requests.get(BASE_URL + URL, auth=HTTPDigestAuth(public_key, private_key))
return resp.content.decode()
if __name__ == '__main__':
ARGS = parse_input()
http_ret = main(ARGS.ip_list, ARGS.public_key, ARGS.private_key)
http_ret_json = json.loads(http_ret)
print(json.dumps(http_ret_json, indent=2, sort_keys=True))
```
#### File: src/api/request_wrappers.py
```python
from functools import wraps
from flask import request
from flask_jwt_extended import get_jwt_identity, get_jwt
from werkzeug.exceptions import BadRequest
from schema import SchemaError
from . import responses as r
from .user.claims import UserClaims
def get_request_data(schema):
def inner_decorator(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
try:
input_data = request.get_json()
schema.validate(input_data)
return fn(request_data=input_data, *args, **kwargs)
except BadRequest:
return r.api_response(r.BAD_REQUEST_CODE, r.BAD_REQUEST_MSG, r.BAD_PARAMS_HTTP)
except SchemaError as exp:
return r.api_response(r.BAD_PARAMS_CODE, f"{r.BAD_PARAMS_MSG} {exp}", r.BAD_PARAMS_HTTP)
return wrapper
return inner_decorator
def get_user(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
token_identity = get_jwt_identity()
if token_identity:
user_id = token_identity["id"]
shared_acc = token_identity["shared_accounts"]
claims = get_jwt().get("uc") or []
else:
user_id = "anonymous"
shared_acc = []
claims = []
is_master_admin = True if UserClaims.MASTER_ADMIN.value in claims else False
user = {"id": user_id, "master_admin": is_master_admin, "shared_acc": shared_acc}
return fn(user=user, *args, **kwargs)
return wrapper
``` |
{
"source": "jmilou/astroquery",
"score": 2
} |
#### File: cadc/tests/test_cadctap_remote.py
```python
import pytest
import os
from datetime import datetime
from astropy.tests.helper import remote_data
from astroquery.cadc import Cadc
from astropy.coordinates import SkyCoord
@remote_data
class TestCadcClass:
# now write tests for each method here
def test_query_region(self):
cadc = Cadc()
result = cadc.query_region('08h45m07.5s +54d18m00s', collection='CFHT')
# do some manipulation of the results. Below it's filtering out based
# on target name but other manipulations are possible.
assert len(result) > 0
urls = cadc.get_data_urls(result[result['target_name'] == 'Nr3491_1'])
assert len(urls) > 0
# urls are a subset of the results that match target_name==Nr3491_1
assert len(result) >= len(urls)
urls_data_only = len(urls)
# now get the auxilary files too
urls = cadc.get_data_urls(result[result['target_name'] == 'Nr3491_1'],
include_auxiliaries=True)
assert urls_data_only <= len(urls)
# the same result should be obtained by querying the entire region
# and filtering out on the CFHT collection
result2 = cadc.query_region('08h45m07.5s +54d18m00s')
assert len(result) == len(result2[result2['collection'] == 'CFHT'])
# search for a target
results = cadc.query_region(SkyCoord.from_name('M31'))
assert len(results) > 20
def test_query_name(self):
cadc = Cadc()
result1 = cadc.query_name('M31')
assert len(result1) > 20
# test case insensitive
result2 = cadc.query_name('m31')
assert len(result1) == len(result2)
def test_query(self):
cadc = Cadc()
result = cadc.query(
"select count(*) from caom2.Observation where target_name='M31'")
assert 1000 < result[0][0]
# test that no proprietary results are returned when not logged in
now = datetime.utcnow()
query = "select top 1 * from caom2.Plane where " \
"metaRelease>'{}'".format(now.strftime('%Y-%m-%dT%H:%M:%S.%f'))
result = cadc.query(query)
assert len(result) == 0
@pytest.mark.skipif(('CADC_USER' not in os.environ or
'CADC_PASSWD' not in os.environ),
reason='Requires real CADC user/password (CADC_USER '
'and CADC_PASSWD environment variables)')
def test_login(self):
cadc = Cadc()
now = datetime.utcnow()
cadc.login(os.environ['CADC_USER'], os.environ['CADC_PASSWD'])
query = "select top 1 * from caom2.Plane where " \
"metaRelease>'{}'".format(now.strftime('%Y-%m-%dT%H:%M:%S.%f'))
result = cadc.query(query)
assert len(result) == 1
```
#### File: gaia/tests/test_gaiatap.py
```python
import unittest
import os
import pytest
from astroquery.gaia.core import GaiaClass
from astroquery.gaia.tests.DummyTapHandler import DummyTapHandler
from astroquery.utils.tap.conn.tests.DummyConnHandler import DummyConnHandler
from astroquery.utils.tap.conn.tests.DummyResponse import DummyResponse
import astropy.units as u
from astropy.coordinates.sky_coordinate import SkyCoord
from astropy.units import Quantity
import numpy as np
from astroquery.utils.tap.xmlparser import utils
from astroquery.utils.tap.core import TapPlus
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
class TestTap(unittest.TestCase):
def test_load_tables(self):
dummyTapHandler = DummyTapHandler()
tap = GaiaClass(dummyTapHandler)
# default parameters
parameters = {}
parameters['only_names'] = False
parameters['include_shared_tables'] = False
parameters['verbose'] = False
tap.load_tables()
dummyTapHandler.check_call('load_tables', parameters)
# test with parameters
dummyTapHandler.reset()
parameters = {}
parameters['only_names'] = True
parameters['include_shared_tables'] = True
parameters['verbose'] = True
tap.load_tables(True, True, True)
dummyTapHandler.check_call('load_tables', parameters)
def test_load_table(self):
dummyTapHandler = DummyTapHandler()
tap = GaiaClass(dummyTapHandler)
# default parameters
parameters = {}
parameters['table'] = 'table'
parameters['verbose'] = False
tap.load_table('table')
dummyTapHandler.check_call('load_table', parameters)
# test with parameters
dummyTapHandler.reset()
parameters = {}
parameters['table'] = 'table'
parameters['verbose'] = True
tap.load_table('table', verbose=True)
dummyTapHandler.check_call('load_table', parameters)
def test_launch_sync_job(self):
dummyTapHandler = DummyTapHandler()
tap = GaiaClass(dummyTapHandler)
query = "query"
# default parameters
parameters = {}
parameters['query'] = query
parameters['name'] = None
parameters['output_file'] = None
parameters['output_format'] = 'votable'
parameters['verbose'] = False
parameters['dump_to_file'] = False
parameters['upload_resource'] = None
parameters['upload_table_name'] = None
tap.launch_job(query)
dummyTapHandler.check_call('launch_job', parameters)
# test with parameters
dummyTapHandler.reset()
name = 'name'
output_file = 'output'
output_format = 'format'
verbose = True
dump_to_file = True
upload_resource = 'upload_res'
upload_table_name = 'upload_table'
parameters['query'] = query
parameters['name'] = name
parameters['output_file'] = output_file
parameters['output_format'] = output_format
parameters['verbose'] = verbose
parameters['dump_to_file'] = dump_to_file
parameters['upload_resource'] = upload_resource
parameters['upload_table_name'] = upload_table_name
tap.launch_job(query,
name=name,
output_file=output_file,
output_format=output_format,
verbose=verbose,
dump_to_file=dump_to_file,
upload_resource=upload_resource,
upload_table_name=upload_table_name)
dummyTapHandler.check_call('launch_job', parameters)
def test_launch_async_job(self):
dummyTapHandler = DummyTapHandler()
tap = GaiaClass(dummyTapHandler)
query = "query"
# default parameters
parameters = {}
parameters['query'] = query
parameters['name'] = None
parameters['output_file'] = None
parameters['output_format'] = 'votable'
parameters['verbose'] = False
parameters['dump_to_file'] = False
parameters['background'] = False
parameters['upload_resource'] = None
parameters['upload_table_name'] = None
tap.launch_job_async(query)
dummyTapHandler.check_call('launch_job_async', parameters)
# test with parameters
dummyTapHandler.reset()
name = 'name'
output_file = 'output'
output_format = 'format'
verbose = True
dump_to_file = True
background = True
upload_resource = 'upload_res'
upload_table_name = 'upload_table'
parameters['query'] = query
parameters['name'] = name
parameters['output_file'] = output_file
parameters['output_format'] = output_format
parameters['verbose'] = verbose
parameters['dump_to_file'] = dump_to_file
parameters['background'] = background
parameters['upload_resource'] = upload_resource
parameters['upload_table_name'] = upload_table_name
tap.launch_job_async(query,
name=name,
output_file=output_file,
output_format=output_format,
verbose=verbose,
dump_to_file=dump_to_file,
background=background,
upload_resource=upload_resource,
upload_table_name=upload_table_name)
dummyTapHandler.check_call('launch_job_async', parameters)
def test_list_async_jobs(self):
dummyTapHandler = DummyTapHandler()
tap = GaiaClass(dummyTapHandler)
# default parameters
parameters = {}
parameters['verbose'] = False
tap.list_async_jobs()
dummyTapHandler.check_call('list_async_jobs', parameters)
# test with parameters
dummyTapHandler.reset()
parameters['verbose'] = True
tap.list_async_jobs(verbose=True)
dummyTapHandler.check_call('list_async_jobs', parameters)
def test_query_object(self):
connHandler = DummyConnHandler()
tapplus = TapPlus("http://test:1111/tap", connhandler=connHandler)
tap = GaiaClass(tapplus)
# Launch response: we use default response because the query contains decimals
responseLaunchJob = DummyResponse()
responseLaunchJob.set_status_code(200)
responseLaunchJob.set_message("OK")
jobDataFile = data_path('job_1.vot')
jobData = utils.read_file_content(jobDataFile)
responseLaunchJob.set_data(method='POST',
context=None,
body=jobData,
headers=None)
# The query contains decimals: force default response
connHandler.set_default_response(responseLaunchJob)
sc = SkyCoord(ra=29.0, dec=15.0, unit=(u.degree, u.degree), frame='icrs')
with pytest.raises(ValueError) as err:
tap.query_object(sc)
assert "Missing required argument: 'width'" in err.value.args[0]
width = Quantity(12, u.deg)
with pytest.raises(ValueError) as err:
tap.query_object(sc, width=width)
assert "Missing required argument: 'height'" in err.value.args[0]
height = Quantity(10, u.deg)
table = tap.query_object(sc, width=width, height=height)
assert len(table) == 3, \
"Wrong job results (num rows). Expected: %d, found %d" % \
(3, len(table))
self.__check_results_column(table,
'alpha',
'alpha',
None,
np.float64)
self.__check_results_column(table,
'delta',
'delta',
None,
np.float64)
self.__check_results_column(table,
'source_id',
'source_id',
None,
np.object)
self.__check_results_column(table,
'table1_oid',
'table1_oid',
None,
np.int32)
# by radius
radius = Quantity(1, u.deg)
table = tap.query_object(sc, radius=radius)
assert len(table) == 3, \
"Wrong job results (num rows). Expected: %d, found %d" % \
(3, len(table))
self.__check_results_column(table,
'alpha',
'alpha',
None,
np.float64)
self.__check_results_column(table,
'delta',
'delta',
None,
np.float64)
self.__check_results_column(table,
'source_id',
'source_id',
None,
np.object)
self.__check_results_column(table,
'table1_oid',
'table1_oid',
None,
np.int32)
def test_query_object_async(self):
connHandler = DummyConnHandler()
tapplus = TapPlus("http://test:1111/tap", connhandler=connHandler)
tap = GaiaClass(tapplus)
jobid = '12345'
# Launch response
responseLaunchJob = DummyResponse()
responseLaunchJob.set_status_code(303)
responseLaunchJob.set_message("OK")
# list of list (httplib implementation for headers in response)
launchResponseHeaders = [
['location', 'http://test:1111/tap/async/' + jobid]
]
responseLaunchJob.set_data(method='POST',
context=None,
body=None,
headers=launchResponseHeaders)
connHandler.set_default_response(responseLaunchJob)
# Phase response
responsePhase = DummyResponse()
responsePhase.set_status_code(200)
responsePhase.set_message("OK")
responsePhase.set_data(method='GET',
context=None,
body="COMPLETED",
headers=None)
req = "async/" + jobid + "/phase"
connHandler.set_response(req, responsePhase)
# Results response
responseResultsJob = DummyResponse()
responseResultsJob.set_status_code(200)
responseResultsJob.set_message("OK")
jobDataFile = data_path('job_1.vot')
jobData = utils.read_file_content(jobDataFile)
responseResultsJob.set_data(method='GET',
context=None,
body=jobData,
headers=None)
req = "async/" + jobid + "/results/result"
connHandler.set_response(req, responseResultsJob)
sc = SkyCoord(ra=29.0, dec=15.0, unit=(u.degree, u.degree), frame='icrs')
width = Quantity(12, u.deg)
height = Quantity(10, u.deg)
table = tap.query_object_async(sc, width=width, height=height)
assert len(table) == 3, \
"Wrong job results (num rows). Expected: %d, found %d" % \
(3, len(table))
self.__check_results_column(table,
'alpha',
'alpha',
None,
np.float64)
self.__check_results_column(table,
'delta',
'delta',
None,
np.float64)
self.__check_results_column(table,
'source_id',
'source_id',
None,
np.object)
self.__check_results_column(table,
'table1_oid',
'table1_oid',
None,
np.int32)
# by radius
radius = Quantity(1, u.deg)
table = tap.query_object_async(sc, radius=radius)
assert len(table) == 3, \
"Wrong job results (num rows). Expected: %d, found %d" % \
(3, len(table))
self.__check_results_column(table,
'alpha',
'alpha',
None,
np.float64)
self.__check_results_column(table,
'delta',
'delta',
None,
np.float64)
self.__check_results_column(table,
'source_id',
'source_id',
None,
np.object)
self.__check_results_column(table,
'table1_oid',
'table1_oid',
None,
np.int32)
def test_cone_search_sync(self):
connHandler = DummyConnHandler()
tapplus = TapPlus("http://test:1111/tap", connhandler=connHandler)
tap = GaiaClass(tapplus)
# Launch response: we use default response because the query contains decimals
responseLaunchJob = DummyResponse()
responseLaunchJob.set_status_code(200)
responseLaunchJob.set_message("OK")
jobDataFile = data_path('job_1.vot')
jobData = utils.read_file_content(jobDataFile)
responseLaunchJob.set_data(method='POST',
context=None,
body=jobData,
headers=None)
ra = 19.0
dec = 20.0
sc = SkyCoord(ra=ra, dec=dec, unit=(u.degree, u.degree), frame='icrs')
radius = Quantity(1.0, u.deg)
connHandler.set_default_response(responseLaunchJob)
job = tap.cone_search(sc, radius)
assert job is not None, "Expected a valid job"
assert job.async_ is False, "Expected a synchronous job"
assert job.get_phase() == 'COMPLETED', \
"Wrong job phase. Expected: %s, found %s" % \
('COMPLETED', job.get_phase())
assert job.failed is False, "Wrong job status (set Failed = True)"
# results
results = job.get_results()
assert len(results) == 3, \
"Wrong job results (num rows). Expected: %d, found %d" % \
(3, len(results))
self.__check_results_column(results,
'alpha',
'alpha',
None,
np.float64)
self.__check_results_column(results,
'delta',
'delta',
None,
np.float64)
self.__check_results_column(results,
'source_id',
'source_id',
None,
np.object)
self.__check_results_column(results,
'table1_oid',
'table1_oid',
None,
np.int32)
def test_cone_search_async(self):
connHandler = DummyConnHandler()
tapplus = TapPlus("http://test:1111/tap", connhandler=connHandler)
tap = GaiaClass(tapplus)
jobid = '12345'
# Launch response
responseLaunchJob = DummyResponse()
responseLaunchJob.set_status_code(303)
responseLaunchJob.set_message("OK")
# list of list (httplib implementation for headers in response)
launchResponseHeaders = [
['location', 'http://test:1111/tap/async/' + jobid]
]
responseLaunchJob.set_data(method='POST',
context=None,
body=None,
headers=launchResponseHeaders)
ra = 19
dec = 20
sc = SkyCoord(ra=ra, dec=dec, unit=(u.degree, u.degree), frame='icrs')
radius = Quantity(1.0, u.deg)
connHandler.set_default_response(responseLaunchJob)
# Phase response
responsePhase = DummyResponse()
responsePhase.set_status_code(200)
responsePhase.set_message("OK")
responsePhase.set_data(method='GET',
context=None,
body="COMPLETED",
headers=None)
req = "async/" + jobid + "/phase"
connHandler.set_response(req, responsePhase)
# Results response
responseResultsJob = DummyResponse()
responseResultsJob.set_status_code(200)
responseResultsJob.set_message("OK")
jobDataFile = data_path('job_1.vot')
jobData = utils.read_file_content(jobDataFile)
responseResultsJob.set_data(method='GET',
context=None,
body=jobData,
headers=None)
req = "async/" + jobid + "/results/result"
connHandler.set_response(req, responseResultsJob)
job = tap.cone_search_async(sc, radius)
assert job is not None, "Expected a valid job"
assert job.async_ is True, "Expected an asynchronous job"
assert job.get_phase() == 'COMPLETED', \
"Wrong job phase. Expected: %s, found %s" % \
('COMPLETED', job.get_phase())
assert job.failed is False, "Wrong job status (set Failed = True)"
# results
results = job.get_results()
assert len(results) == 3, \
"Wrong job results (num rows). Expected: %d, found %d" % \
(3, len(results))
self.__check_results_column(results,
'alpha',
'alpha',
None,
np.float64)
self.__check_results_column(results,
'delta',
'delta',
None,
np.float64)
self.__check_results_column(results,
'source_id',
'source_id',
None,
np.object)
self.__check_results_column(results,
'table1_oid',
'table1_oid',
None,
np.int32)
def __check_results_column(self, results, columnName, description, unit,
dataType):
c = results[columnName]
assert c.description == description, \
"Wrong description for results column '%s'. Expected: '%s', found '%s'" % \
(columnName, description, c.description)
assert c.unit == unit, \
"Wrong unit for results column '%s'. Expected: '%s', found '%s'" % \
(columnName, unit, c.unit)
assert c.dtype == dataType, \
"Wrong dataType for results column '%s'. Expected: '%s', found '%s'" % \
(columnName, dataType, c.dtype)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
``` |
{
"source": "jmilou/hcipy",
"score": 3
} |
#### File: hcipy/propagation/fresnel.py
```python
import numpy as np
from ..optics import Wavefront, AgnosticOpticalElement, make_agnostic_forward, make_agnostic_backward
from ..field import Field, evaluate_supersampled
from ..fourier import FastFourierTransform, make_fft_grid, FourierFilter
class FresnelPropagator(AgnosticOpticalElement):
'''The monochromatic Fresnel propagator for scalar fields.
The Fresnel propagator is implemented as described in [Goodman2005]_.
.. [Goodman2005] <NAME>., 2005 Introduction to Fourier optics. Roberts and Company Publishers.
Parameters
----------
input_grid : anything
This argument is ignored. The input grid is taken from the incoming wavefront.
distance : scalar
The distance to propagate
num_oversampling : int
The number of times the transfer function is oversampled. Default is 2.
wavelength : scalar
The wavelength of the wavefront.
refractive_index : scalar
The refractive index of the medium that the wavefront is propagating in.
Raises
------
ValueError
If the `input_grid` is not regular and Cartesian.
'''
def __init__(self, input_grid, distance, num_oversampling=2, refractive_index=1):
self._distance = distance
self._num_oversampling = num_oversampling
self._refractive_index = refractive_index
AgnosticOpticalElement.__init__(self, grid_dependent=True, wavelength_dependent=True)
def make_instance(self, instance_data, input_grid, output_grid, wavelength):
if not input_grid.is_regular or not input_grid.is_('cartesian'):
raise ValueError('The input grid must be a regular, Cartesian grid.')
k = 2 * np.pi / wavelength * self.evaluate_parameter(self.refractive_index, input_grid, output_grid, wavelength)
L_max = np.max(input_grid.dims * input_grid.delta)
if np.any(input_grid.delta < wavelength * self.distance / L_max):
def transfer_function(fourier_grid):
enlarged_grid = make_fft_grid(fourier_grid)
fft_upscale = FastFourierTransform(enlarged_grid)
def impulse_response(grid):
r_squared = grid.x**2 + grid.y**2
return Field(np.exp(1j * k * self.distance) / (1j * wavelength * self.distance) * np.exp(1j * k * r_squared / (2 * self.distance)), grid)
impulse_response = evaluate_supersampled(impulse_response, enlarged_grid, self.num_oversampling)
return fft_upscale.forward(impulse_response)
else:
def transfer_function_native(fourier_grid):
k_squared = fourier_grid.as_('polar').r**2
phase_factor = np.exp(1j * k * self.distance)
return Field(np.exp(-0.5j * self.distance * k_squared / k) * phase_factor, fourier_grid)
def transfer_function(fourier_grid):
return evaluate_supersampled(transfer_function_native, fourier_grid, self.num_oversampling)
instance_data.fourier_filter = FourierFilter(input_grid, transfer_function, q=2)
@property
def distance(self):
return self._distance
@distance.setter
def distance(self, distance):
self._distance = distance
self.clear_cache()
@property
def num_oversampling(self):
return self._num_oversampling
@num_oversampling.setter
def num_oversampling(self, num_oversampling):
self._num_oversampling = num_oversampling
self.clear_cache()
@property
def refractive_index(self):
return self._refractive_index
@refractive_index.setter
def refractive_index(self, refractive_index):
self._refractive_index = refractive_index
self.clear_cache()
def get_input_grid(self, output_grid, wavelength):
return output_grid
def get_output_grid(self, input_grid, wavelength):
return input_grid
@make_agnostic_forward
def forward(self, instance_data, wavefront):
'''Propagate a wavefront forward by a certain distance.
Parameters
----------
wavefront : Wavefront
The incoming wavefront.
Returns
-------
Wavefront
The wavefront after the propagation.
'''
filtered = instance_data.fourier_filter.forward(wavefront.electric_field)
return Wavefront(filtered, wavefront.wavelength, wavefront.input_stokes_vector)
@make_agnostic_backward
def backward(self, instance_data, wavefront):
'''Propagate a wavefront backward by a certain distance.
Parameters
----------
wavefront : Wavefront
The incoming wavefront.
Returns
-------
Wavefront
The wavefront after the propagation.
'''
filtered = instance_data.fourier_filter.backward(wavefront.electric_field)
return Wavefront(filtered, wavefront.wavelength, wavefront.input_stokes_vector)
``` |
{
"source": "jmilou/image_utilities",
"score": 3
} |
#### File: jmilou/image_utilities/curve2map.py
```python
import numpy as np
from scipy.interpolate import interp1d
def create2dMap(values,inputRadii=None,maxRadius=None):
"""
This function takes a 1D radial distribution in input and builds a 2map
"""
nbValues=len(values)
if inputRadii==None:
inputRadii=np.arange(0,nbValues)
maxRadius=nbValues
else:
if maxRadius==None:
raise ValueError('You must provide a maximum radius')
imageAxis = np.arange(-maxRadius/2,maxRadius/2)
x,y = np.meshgrid(imageAxis,imageAxis)
distmap = abs(x+1j*y)
# map2d = np.ndarray(distmap.shape)
radiusOK = np.isfinite(values)
func = interp1d(inputRadii[radiusOK],values[radiusOK],kind='cubic',
bounds_error=False,fill_value=np.nan)
map2d = func(distmap)
return map2d,distmap
```
#### File: jmilou/image_utilities/dtts_peak_finder.py
```python
from astropy.io import fits ,ascii
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from astropy.modeling.functional_models import Gaussian2D
import mpfit
from fwhm import fwhm2sig,sig2fwhm
from scipy.ndimage import gaussian_filter, median_filter
import numpy as np
import photutils
class Dtts_peak_finder():
""" Object that analyses the DTTS images and finds the ones with a star,
and automatically detect if the LWE is there or not.
Attributes:
- image: the input image
Methods:
-
"""
# class variables
DTTS_gain = 2.7 #2.7 e/ADU (VLT-TRE-SPH-14690-626)
lam = 1.53e-6
px_scale = 11.5e-3 #in arcsec (from DTTS documentation: f/D=40.38 and pix=18micron)
def __init__(self,cube,background='auto'):
"""
Constructor of the class.
Input:
- cube: the DTTS cube to analyze
- background: 'auto' for automatic background detection and subtraction
or 'False' for no background subtraction
Output:
- nothing
"""
if not cube.ndim == 3 or cube.shape[1]!=32 or cube.shape[2]!=32:
raise TypeError('The input is not a 32x32 cube.')
self.nframes,self.ny,self.nx = cube.shape
# # We define a mask to measure the background noise: masked values are
# # pixels at more than 7 pixels from the edge or the first line and column
# # (which corresponds to a row/column of bad pixels). This mask is no longer
# # used in this state of the code.
# self.mask = np.zeros((32,32),dtype=bool)
# self.mask[7:-7,7:-7]=True
# self.mask[:,0]=True
# self.mask[0,:]=True
self.theoretical_fwhm = np.rad2deg(self.lam/8.)*3600/self.px_scale #(from DTTS documentation)
self.theoretical_sig = fwhm2sig(self.theoretical_fwhm)
x_vect = np.arange(0,self.nx)
y_vect = np.arange(0,self.ny)
self.x_array,self.y_array = np.meshgrid(x_vect,y_vect)
self.cube = np.copy(cube)
self.residuals = np.zeros_like(cube)
threshold_bck = 6. #threshold for the automatic background detection
max_cube = np.max(cube,axis=(1,2))
if background=='auto':
nbck = np.sum(max_cube<threshold_bck)
#print('Automatic selection of {0:d} frames as backgrounds'.format(nbck))
elif background=='False':
nbck = 0
print('Background selection was de-activated')
else:
print('Background subtraction method not understood: {0}. It should be auto or False'.format(background))
if nbck>0:
plt.figure(0)
plt.semilogy(max_cube,label='star')
plt.semilogy(np.arange(self.nframes)[max_cube<threshold_bck],max_cube[max_cube<threshold_bck],'or',label='no star')
plt.legend(frameon=False)
# cube of background frames
self.bck_cube = self.cube[max_cube<threshold_bck,:,:]
print('Max background value: {0:.1f} ADU'.format(np.max(self.bck_cube)))
# std_bck is a 1d array that gives the 2D spatial RMS of each background
std_bck = sorted(np.std(self.bck_cube,axis=(1,2)))
# ref_std_bck is the median value of std_bck
ref_std_bck = std_bck[len(std_bck)//2]
# master_bck is the master background
self.master_bck = np.mean(self.bck_cube, axis=0)
# The reference background is the frame of bck_cube with
self.bck_ref = self.bck_cube[np.std(self.bck_cube,axis=(1,2)) == ref_std_bck][0,:,:]
self.sky_med = np.median(self.bck_ref)
self.sky_rms = np.median(np.std(self.bck_cube,axis=(1,2)))
else:
#print('No background subtraction')
self.master_bck = np.zeros((self.ny,self.nx))
self.sky_med = 0.
self.sky_rms = 1.
threshold_star = 15.
self.fit_result = {'AMP':np.ones(self.nframes)*np.nan,'X':np.ones(self.nframes)*np.nan,\
'FWHMX':np.ones(self.nframes)*np.nan,\
'Y':np.ones(self.nframes)*np.nan,'FWHMY':np.ones(self.nframes)*np.nan,\
'FWHM':np.ones(self.nframes)*np.nan,\
'THETA':np.ones(self.nframes)*np.nan,'ell':np.ones(self.nframes)*np.nan,\
'CHI2':np.ones(self.nframes)*np.nan,\
'CHI2_r':np.ones(self.nframes)*np.nan,\
'strength':np.ones(self.nframes)*np.nan, \
'threshold':np.ones(self.nframes)*np.nan}
self.fit_error = { 'AMP':np.ones(self.nframes)*np.nan,'X':np.ones(self.nframes)*np.nan,\
'Y':np.ones(self.nframes)*np.nan,'FWHMX':np.ones(self.nframes)*np.nan,\
'FWHMY':np.ones(self.nframes)*np.nan,'FWHM':np.ones(self.nframes)*np.nan,\
'THETA': np.ones(self.nframes)*np.nan,\
'ell':np.ones(self.nframes)*np.nan}
self.good_frames, = np.where(max_cube>=threshold_star)
def gauss2D_fit_erf(self,p,fjac=None, x=None,y=None, z=None,err=None):
'''
Computes the residuals to be minimized by mpfit, given a model and data.
'''
model = Gaussian2D(p[0],p[1],p[2],p[3],p[4],np.radians(p[5]))(x,y)
status = 0
return ([status, ((z-model)/err).ravel()])
def fit_gaussian(self,plot=True,verbose=False,save=None):
"""
Perform a fit of a 2D gaussian.
Input:
- plot: (optional) bool. If True, makes a plot of the image with
the contours of the gaussian
- verbose: (optional) bool. If True, prints the verbose of mpdfit
- additional optional keywords can be 'amp', 'centerx', 'centery',
'sigmax','sigmay','fwhm' or 'theta' to set the value of the
first guess of the fit. theta must be between 0 and 90
- save: (optional) string with the name to save a pdf of the fit (only
valid if plot=True)
and a ds9 reg file (still to be implemented)
Output:
- fit_result: a dictionary with the parameters of the best fit.
The entries are 'AMP' 'X' 'FWHMX' 'Y' 'FWHMY' 'FWHM' 'THETA' 'ell'
'CHI2', 'CHI2_r','strength','threshold'
- fit_error: a dictionary with the parameters of the error on the previous parameters (same entries)
- chi2: value of the chi square
- chi2_reduced: value of the reduced chi squared
"""
for i in self.good_frames:
if verbose:
print('Processing image {0:d}'.format(i))
current_image = self.cube[i,:,:]-self.master_bck
# current_ma = np.ma.masked_array(current_image,mask=self.mask)
# sky_med = np.median(current_ma)
# sky_rms = np.std(current_ma)
sky_med = self.sky_med
sky_rms = self.sky_rms
if sky_med>5:
print('Warning, the sky level is high: {0:5.1f} ADU'.format(sky_med))
if sky_rms>5:
print('Warning, the background noise is high: {0:5.1f} ADU'.format(sky_rms))
# We first set a default guess
filtered_image = gaussian_filter(current_image,2)
argmax = np.argmax(filtered_image)
ymax,xmax = np.unravel_index(argmax,current_image.shape)
amp= np.max(current_image)
guess_dico = {'amp':amp,'centerx':xmax,'centery':ymax,'sigx':self.theoretical_sig,'sigy':self.theoretical_sig,'theta':0.}
# We also set default boundaries
parinfo =[{'fixed':0, 'limited':[1,1], 'limits':[0.,2*amp]}, # Force the amplitude to be >0
{'fixed':0, 'limited':[1,1], 'limits':[7,self.nx-7]}, # We restrain the center to be 1px
{'fixed':0, 'limited':[1,1], 'limits':[7,self.ny-7]}, # away from the edge
{'fixed':0, 'limited':[1,1], 'limits':[self.theoretical_sig,1.4*self.theoretical_sig]}, # sigma_x between 0.5 and 10px
{'fixed':0, 'limited':[1,1], 'limits':[self.theoretical_sig,1.4*self.theoretical_sig]}, # sigma_y between 0.5 and 10px
{'fixed':0, 'limited':[1,1], 'limits':[0,180.]}] # We limit theta beween 0 and 90 deg
fa = {'x': self.x_array, 'y': self.y_array, 'z':current_image, 'err':np.ones_like(current_image)*sky_rms}
guess = [guess_dico['amp'],guess_dico['centerx'],guess_dico['centery'],guess_dico['sigx'],guess_dico['sigy'],guess_dico['theta']]
m = mpfit.mpfit(self.gauss2D_fit_erf, guess, functkw=fa, parinfo=parinfo,quiet=1)# quiet=(not verbose)*1)
if m.status == 0:
if verbose:
print('Fit failed for frame {0:d}. Try to help the minimizer by providing a better first guess'.format(i))
else:
residuals = self.gauss2D_fit_erf(m.params,x=self.x_array,y=self.y_array,z=current_image,err=np.ones_like(current_image)*sky_rms)[1].reshape(current_image.shape)
self.residuals[i,:,:] = residuals #+self.bck #+sky_med
self.fit_result['CHI2'][i] = np.sum(residuals**2)
self.fit_result['CHI2_r'][i] = self.fit_result['CHI2'][i] / m.dof
self.fit_result['AMP'][i] = m.params[0]
self.fit_result['X'][i] = m.params[1]
self.fit_result['Y'][i] = m.params[2]
sig = np.array([m.params[3],m.params[4]])
sig_error=np.array([m.perror[3],m.perror[4]])
error_ell = 4/(np.sum(sig)**2)*np.sqrt(np.sum((sig*sig_error)**2))
fwhm = sig2fwhm(sig)
fwhm_error = sig2fwhm(sig_error)
self.fit_result['FWHMX'][i] = fwhm[0]
self.fit_result['FWHMY'][i] = fwhm[1]
self.fit_result['FWHM'][i] = np.mean(fwhm)
self.fit_result['THETA'][i] = m.params[5]
self.fit_result['ell'][i] = (sig[1]-sig[0])/np.mean(sig)
self.fit_error['AMP'][i] = m.perror[0]
self.fit_error['X'][i] = m.perror[1]
self.fit_error['Y'][i] = m.perror[2]
self.fit_error['FWHMX'][i] = fwhm_error[0]
self.fit_error['FWHMY'][i] = fwhm_error[1]
self.fit_error['FWHM'][i] = np.mean(fwhm_error)
self.fit_error['THETA'][i] = m.perror[5]
self.fit_error['ell'][i] = error_ell
separation_apertures = 1.63 # maxima of the first Airy ring
# we sample the angles with one point every px along the perimeter
thetas = np.linspace(0, 2*np.pi, int(2*np.pi*separation_apertures*self.theoretical_fwhm),endpoint=False)
x_centres = self.fit_result['X'][i] + separation_apertures*self.theoretical_fwhm*np.cos(thetas)
y_centres = self.fit_result['Y'][i] + separation_apertures*self.theoretical_fwhm*np.sin(thetas)
centres=[(x_centres[i],y_centres[i]) for i in range(len(x_centres))]
circular_apertures = photutils.CircularAperture(centres, \
r=self.theoretical_fwhm/2)
phot_table_circle = photutils.aperture_photometry(current_image, \
circular_apertures,error=np.ones_like(current_image)*sky_rms)
error_array = photutils.utils.calc_total_error(current_image, \
bkg_error=np.ones_like(current_image)*sky_rms, \
effective_gain=1/self.DTTS_gain)
phot_table_errors = photutils.aperture_photometry(error_array, \
circular_apertures)
self.LWE_threshold = np.median(phot_table_errors['aperture_sum'])
central_aperture = photutils.CircularAperture((self.fit_result['X'][i],\
self.fit_result['Y'][i]),r=self.theoretical_fwhm/2)
central_flux = photutils.aperture_photometry(current_image, \
central_aperture,error=np.ones_like(current_image)*sky_rms)['aperture_sum']
sorted_indices = np.argsort(phot_table_circle['aperture_sum'])
first_max = phot_table_circle['aperture_sum'][sorted_indices[-1]]
first_min = phot_table_circle['aperture_sum'][sorted_indices[0]]
# we look for th second maximum, and check that the separation
# between the first and second maximum is more that 1 resel
idx = 1
while separation_apertures*np.abs(thetas[sorted_indices[-1]]-thetas[sorted_indices[-idx]])<1.:
secondary_max = phot_table_circle['aperture_sum'][sorted_indices[-idx]]
idx+=1
self.fit_result['strength'][i] = (first_max + secondary_max - 2*first_min)/2./(central_flux)
self.fit_result['threshold'][i] = self.LWE_threshold / (central_flux)
# self.fit_result['CHI2_r'][i] = self.fit_result['CHI2_r'][i] / central_flux * (np.pi*self.theoretical_fwhm**2)
if verbose:
print('X={0:4.2f}+/-{1:4.2f} Y={2:4.2f}+/-{3:4.2f} FWHM={4:3.2f}+/-{5:4.2f} ell={6:4.2f}+/-{7:4.2f}'.format(self.fit_result['X'][i],\
self.fit_error['X'][i],self.fit_result['Y'][i],self.fit_error['Y'][i],self.fit_result['FWHM'][i],self.fit_error['FWHM'][i],self.fit_result['ell'][i],self.fit_error['ell'][i],))
print('AMP={0:4.2e}+/-{1:3.2e} theta={2:3.1f}+/-{3:3.1f}deg SKY={4:4.2f}+/-{5:4.2f}'.format(self.fit_result['AMP'][i],\
self.fit_error['AMP'][i],self.fit_result['THETA'][i],self.fit_error['THETA'][i],sky_med,sky_rms))
print('DOF={0:d} CHI2={1:.1f} CHI2_r={2:.1f}'.format(m.dof,self.fit_result['CHI2'][i],self.fit_result['CHI2_r'][i]))
if plot:
plt.close(1)
fig = plt.figure(1, figsize=(7.5,3))
gs = gridspec.GridSpec(1,3, height_ratios=[1], width_ratios=[1,1,0.06])
gs.update(left=0.1, right=0.9, bottom=0.1, top=0.93, wspace=0.2, hspace=0.03)
ax1 = plt.subplot(gs[0,0]) # Area for the first plot
ax2 = plt.subplot(gs[0,1]) # Area for the second plot
ax3 = plt.subplot(gs[0,2]) # Area for the second plot
im = ax1.imshow(current_image,cmap='CMRmap',origin='lower', interpolation='nearest',\
extent=[np.min(self.x_array),np.max(self.x_array),np.min(self.y_array),np.max(self.y_array)],vmin=np.nanmin(current_image),vmax=np.nanmax(current_image))
ax1.set_xlabel('X in px')
ax1.set_ylabel('Y in px')
ax1.contour(self.x_array,self.y_array,sky_med+Gaussian2D(m.params[0],\
m.params[1],m.params[2],m.params[3],m.params[4],np.radians(m.params[5]))(self.x_array,self.y_array),3,colors='w')
ax1.grid(True,c='w')
ax2.imshow(residuals,cmap='CMRmap',origin='lower', interpolation='nearest',\
extent=[np.min(self.x_array),np.max(self.x_array),np.min(self.y_array),np.max(self.y_array)],vmin=np.nanmin(current_image),vmax=np.nanmax(current_image))
ax2.set_xlabel('X in px')
ax2.grid(True,c='w')
fig.colorbar(im, cax=ax3)
if save is not None:
fig.savefig(save+'_{0:d}.pdf'.format(i))
plt.figure(1)
plt.clf()
undetected_strength = [i for i,s in enumerate(self.fit_result['strength'][self.good_frames]) if s < self.fit_result['threshold'][self.good_frames][i]]
detected_strength = [i for i,s in enumerate(self.fit_result['strength'][self.good_frames]) if s >= self.fit_result['threshold'][self.good_frames][i]]
plt.plot(self.good_frames[undetected_strength],self.fit_result['strength'][self.good_frames][undetected_strength],'g.',label='undetected strength')
plt.plot(self.good_frames[detected_strength],self.fit_result['strength'][self.good_frames][detected_strength],'r.',label='detected strength')
plt.plot(self.good_frames, self.fit_result['threshold'][self.good_frames],'k:',linewidth=1.0,label='Threshold')
plt.xlabel('Frame number')
plt.ylabel('Asymmetry in %')
plt.legend(frameon=False,loc='best')
return
if __name__ == '__main__':
# import vip
# ds9=vip.fits.vipDS9()
print('OK')
# cube = fits.getdata('/Users/jmilli/Documents/SPHERE/Sparta/2017-03-19/sparta_DTTS_cube_2017-03-19.fits')
# cube = cube[5700:6100,:,:]
# DTTS_peak_finder = Dtts_peak_finder(cube)
# DTTS_peak_finder.fit_gaussian(verbose=False,plot=False)
# plt.plot(DTTS_peak_finder.fit_result['CHI2_r'][DTTS_peak_finder.good_frames])
# plt.plot(DTTS_peak_finder.fit_result['strength'][DTTS_peak_finder.good_frames],label=)
```
#### File: jmilou/image_utilities/radial_data.py
```python
import sys
import numpy as np
from scipy.interpolate import interp1d
from scipy.stats import norm,t
#sys.path.append('/Users/jmilli/Dropbox/lib_py/image_utilities') # add path to our file
#from image_tools import *
# 2017-11-24 JMi: adapted the case of an odd image size
# 2015-02-10 JMi: changed the definition of radial
# 2010-03-10 19:22 IJC: Ported to python from Matlab
# 2005/12/19 Added 'working_region' option (IJC)
# 2005/12/15 Switched order of outputs (IJC)
# 2005/12/12 IJC: Removed decifact, changed name, wrote comments.
# 2005/11/04 by <NAME> at the Jet Propulsion Laboratory
class Radial_data():
""" Object containing some radial properties of the image
INPUT:
------
data - whatever data you are radially averaging. Data is
binned into a series of annuli of width 'annulus_width'
pixels.
annulus_width - width of each annulus. Default is 1.
mask - array of same size as 'data', with zeros at
whichever 'data' points you don't want included
in the radial data computations.
x,y - coordinate system in which the data exists (used to set
the center of the data). By default, these are set to
integer meshgrids
rmax -- maximum radial value over which to compute statistics
OUTPUT:
-------
r - a data structure containing the following statistics, computed across each annulus:
.r - the radial coordinate used (mean radius of the pixels used
in the annulus)
.mean - mean of the data in the annulus
.std - standard deviation of the data in the annulus
.median - median value in the annulus
.max - maximum value in the annulus
.min - minimum value in the annulus
.numel - number of elements in the annulus
"""
def __init__(self,data,annulus_width=1,mask=None,xvect=None,yvect=None,rmax=None):
"""
INPUT:
------
data - whatever data you are radially averaging. Data is
binned into a series of annuli of width 'annulus_width'
pixels.
annulus_width - width of each annulus. Default is 1.
mask - array of same size as 'data', with zeros at
whichever 'data' points you don't want included
in the radial data computations.
x,y - coordinate system in which the data exists (used to set
the center of the data). By default, these are set to
integer meshgrids
rmax -- maximum radial value over which to compute statistics
"""
data = np.array(data)
if len(data.shape) != 2 :
raise ValueError('The input array should be a 2D image')
if mask is None:
mask = np.ones(data.shape,bool)
self.npix, self.npiy = data.shape
if xvect is None or yvect is None:
if np.mod(self.npix,2)==0:
xvect = np.arange(-self.npix/2.,self.npix/2.)
else:
xvect = np.arange(-self.npix/2.,self.npix/2.)+0.5
if np.mod(self.npiy,2)==0:
yvect = np.arange(-self.npiy/2.,self.npiy/2.)
else:
yvect = np.arange(-self.npiy/2.,self.npiy/2.)+0.5
# xvect = np.arange(-self.npix/2.,self.npix/2.)
# yvect = np.arange(-self.npiy/2.,self.npiy/2.)
xmap,ymap = np.meshgrid(xvect,yvect)
self.distmap = np.abs(xmap+1j*ymap)
if rmax==None:
rmax = np.max(self.distmap[mask])
#---------------------
# Prepare the data container
#---------------------
dr = np.abs([xmap[0,0] - xmap[0,1]]) * annulus_width
radial = np.arange(rmax/dr)*dr + dr/2. # this is changed later (JMi)
nrad = len(radial)
self.mean = np.zeros(nrad)
self.std = np.zeros(nrad)
self.median = np.zeros(nrad)
self.numel = np.zeros(nrad)
self.max = np.zeros(nrad)
self.min = np.zeros(nrad)
self.r = radial
self.noisemap = np.empty(data.shape)
self.azimuthalmedianmap = np.empty(data.shape)
self.noisemap.fill(np.nan)
#---------------------
# Loop through the bins
#---------------------
for irad in range(nrad): #= 1:numel(radial)
minrad = irad*dr
maxrad = minrad + dr
thisindex = (self.distmap>=minrad) * (self.distmap<maxrad) * mask
if not thisindex.ravel().any():
self.mean[irad] = np.nan
self.std[irad] = np.nan
self.median[irad] = np.nan
self.numel[irad] = np.nan
self.max[irad] = np.nan
self.min[irad] = np.nan
else:
self.r[irad] = self.distmap[thisindex].mean()
self.mean[irad] = data[thisindex].mean()
self.std[irad] = data[thisindex].std()
self.median[irad] = np.median(data[thisindex])
self.numel[irad] = data[thisindex].size
self.max[irad] = data[thisindex].max()
self.min[irad] = data[thisindex].min()
self.noisemap[thisindex] = self.std[irad]
self.azimuthalmedianmap[thisindex] = self.median[irad]
def get_noise_function(self,fwhm=None,sigma=5.,curve1d=True,verbose=True):
"""
Returns a function that returns the noise as a function of the separation.
In case the keyword fwhm is set then the penalty term from the theory of
small sample statistics (Mawet et al 2014) is included
in the noise term.
"""
if fwhm is not None:
if verbose:
print('You have included the small sample correction ! That is great !')
noise_curve_corrected=sigma*self.std*self.get_penalty(fwhm,sigma,
curve1d=curve1d,verbose=verbose)
id_ok = np.isfinite(noise_curve_corrected) & (noise_curve_corrected>0)
return interp1d(self.r[id_ok],noise_curve_corrected[id_ok],kind='linear',
bounds_error=False,fill_value=np.nan)
else:
if verbose:
print('You have not included the small sample correction ! Shame ! ')
return interp1d(self.r,sigma*self.std,kind='cubic',bounds_error=False,fill_value=np.nan)
def get_noise_map(self,fwhm=None,sigma=5.):
"""
Returns a 2D noise map corresponding to the 1D profile made 2D.
In case the keyword fwhm is set then the penalty term from the theory of
small sample statistics (Mawet et al 2014) is included
in the noise map, with a number of dof corresponding to the 2D case.
"""
noise_func=self.get_noise_function(fwhm=fwhm,sigma=sigma,curve1d=False)
noisemap_nan_corrected = noise_func(self.distmap)
# noisemap_nan_corrected=np.array(self.noisemap)
# nb_wo_noise_value=0
# for (i,j), value in np.ndenumerate(self.noisemap):
# if np.isnan(value):
# try:
# noisemap_nan_corrected[i,j] = noise_func(self.distmap[i,j])
# except:
# noisemap_nan_corrected[i,j] = np.nan
# nb_wo_noise_value += 1
# if nb_wo_noise_value>0:
# print('Warning: the noise map could not be estimated everywhere, {0:5} pixels have no noise value'.format(nb_wo_noise_value))
return noisemap_nan_corrected
def get_penalty(self,fwhm,sigma=5.,curve1d=True,verbose=False):
"""
Returns an array containing the penalty term to apply to the noise curve
to account for the small number statistics.
Input:
- fwhm: the size of a resolution element in pixel
-sigma: the confidence level expressed in number of sigma for a gaussian
density distribution (by default 5)
-curve1d: if True, it return the penalty term for a 1D contrast curve,
if False it assumes you test each resel independantly and
(for a contrast map for instance) and the penalty term is higher.
"""
# number of resolution elements at each radius r
nbResels = np.array(np.round(2*np.pi*self.r/float(fwhm)),dtype=int)
# Convidence level corresponding to the given sigma level (gaussian)
confidenceLevel = norm.cdf(sigma)
if verbose:
print('The false alarm probability for {0:f} sigma is {1:6.2e}'.format(sigma,1-confidenceLevel))
if curve1d:
print('You chose a 1D contrast curve')
else:
print('You chose a 2D contrast map')
#ppf is the percent point function (inverse of cdf - percentiles)
if curve1d:
return t.ppf(confidenceLevel, nbResels-1)*np.sqrt(1.+1./nbResels)/sigma
else:
return t.ppf(confidenceLevel, nbResels-2)*np.sqrt(1.+1./(nbResels-1))/sigma
if __name__ == '__main__':
import matplotlib.pyplot as plt
from image_tools import distance_array
sigma = 3
size=10
# fake_img = np.random.normal(np.random.randint(-5,5),np.random.rand(),(size,size))
fake_img = distance_array((size,size))#,centerx=size/2.-0.5,centery=size/2.-0.5)
# fake_img = distance_array((size,size))
plt.figure(0)
plt.imshow(fake_img,origin='lower')
plt.colorbar()
# rd=Radial_data(fake_img,xvect=np.arange(-size/2,size/2.)+0.5,yvect=np.arange(-size/2,size/2.)+0.5)
rd=Radial_data(fake_img)
# example of use
plt.figure(1)
plt.plot(rd.r,rd.mean,'ro',label='Mean')
plt.plot(rd.r,rd.std,'g:',label='Std')
plt.plot([0,size/2.*np.sqrt(2)],[0,size/2.*np.sqrt(2)],'b-',label='y=x')
plt.xlabel('Separaton in px')
plt.xlabel('Value in ADU')
plt.grid()
plt.legend()
#
print(rd.r)
print(rd.mean)
# example to compute the penalty factor due to small sample statistics
penalty_factor_1d=rd.get_penalty(1,sigma,verbose=True)
penalty_factor_2d=rd.get_penalty(1,sigma,verbose=True,curve1d=False)
# we double check the result here
sep = rd.r #np.arange(1,11)
nbResels=np.round(2*np.pi*sep)
confidenceLevel = norm.cdf(sigma)
penalty_2d=t.ppf(confidenceLevel, nbResels-2)*np.sqrt(1.+1./(nbResels-1))/sigma
penalty_1d=t.ppf(confidenceLevel, nbResels-1)*np.sqrt(1.+1./(nbResels))/sigma
#we plot the comparison
plt.figure(2)
plt.plot(rd.r,penalty_factor_2d,'ro',label='2D from function',fillstyle='none')
plt.plot(rd.r,penalty_factor_1d,'bo',label='1D from function',fillstyle='none')
plt.plot(sep,penalty_2d,'rx',label='2D check')
plt.plot(sep,penalty_1d,'bx',label='1D check')
plt.xlabel('Separation in resolution elements')
plt.ylabel('Penalty term for a {0:d}$\sigma$ threshold'.format(5))
plt.legend(frameon=False)
plt.grid()
```
#### File: jmilou/image_utilities/sample_ellipse.py
```python
import sys
from sympy import Symbol, nsolve
import math
import numpy as np
import matplotlib.pyplot as plt
#from scipy import ndimage
sys.path.append('/Users/jmilli/Dropbox/lib_py/image_utilities')
import rotation_images as rot
from numpy.linalg import eig, inv
import matplotlib.pylab as plt
def ellipse_points(a,b,R,precision=0.2*math.pi/180, step_R=0.1,plot=True):
"""
Function ellipse_points finds points on an ellipse equaly spaced
Arguments:
1. a: semi-major axis of the ellipse
2. b: semi-minor axis of the ellipse
3. R: spacing between points
Optional arguments:
4. precision: the precision in radians on spacing
5. step_R: the step in spacing between each iteration
"""
x = Symbol('x')
y = Symbol('y')
ellipse = (x/a)**2 + (y/b)**2 - 1
t_final=math.pi/2
iter_nb=0
continue_loop = True
while continue_loop:
iter_nb += 1
if iter_nb > 1:
print('Iterations: {0:.0f}, deviation at final position {1:4.2f} degrees'.format(iter_nb-1,(t-t_final)*180/math.pi))
t=0 #math.pi/10
x_sol=[a*math.cos(t)]
y_sol=[b*math.sin(t)]
t_sol=[t]
while t < t_final-precision:
x0 = a*math.cos(t)
y0 = b*math.sin(t)
cercle = (x-x0)**2 + (y-y0)**2 -R**2
trynextguess=True
nbguessiter=0
while (trynextguess and nbguessiter < 10):
try:
derivative= [-a*math.sin(t),b*math.cos(t)]
direction = R/np.linalg.norm(derivative)*np.array(derivative)
guess = np.array([x0,y0])+direction
sol=nsolve((ellipse,cercle), (x, y), (guess[0],guess[1]))
trynextguess=False
except ValueError as e:
nbguessiter += 1
print(e)
print('Initial guess changed. We retry: {0:4.0f} iterations'.format(
nbguessiter))
t+=math.atan(R/4/a)
#print(sol)
t = math.acos(float(sol[0])/a)
t_sol.append(t)
x_sol.append(a*math.cos(t))
y_sol.append(b*math.sin(t))
if math.fabs(t-t_final) < precision:
continue_loop = False
else:
R-=step_R
print('Number of iterations: {0:4.0f}'.format(iter_nb))
print('Deviation in degrees at final position = {0:4.2f}'.format(
(t-t_final)*180/math.pi))
print('Spacing between points = {0:4.2f}'.format(R))
if plot:
nb_points = 100
theta = np.arange(0,math.pi/2,math.pi/2/nb_points)
x_ellipse = np.array([a*math.cos(i) for i in theta])
y_ellipse = np.array([b*math.sin(i) for i in theta])
plt.plot(x_sol,y_sol, 'ro')
plt.plot(x_ellipse,y_ellipse)
plt.plot([0,a],[0,0])
plt.plot([0,0],[0,b])
plt.axis([0,a, 0, b])
plt.axis('equal') # ajout
plt.show()
return t_sol
def elliptical_mask(size,a,b,epsilon=2.,delta=2.,yc=None,xc=None,theta=0):
"""
Function ellitical_mask builds an elliptical mask. Two ellipses of semi major
axis a-epsilon and a+espislon and of semi-minor axis b-delta and b+delta are built.
The mask is 0 everywhere outside the 2 ellipses and 1 within the 2 ellipses.
Arguments:
1. a: semi-major axis of the ellipse
2. b: semi-minor axis of the ellipse
Optional arguments:
4. epsilon: 2*epsilon+1 is the difference between the inner and outer ellipse.
By default it is 2px
5. delta: 2*epsilon+1 is the difference between the inner and outer ellipse.
By default it is 2px
6.yc: the center of the ellipse in y. By default, size/2
7.xc: the center of the ellipse in x. By default, size/2
8. theta: the position angle of the semi-major axis of the ellipse, measured
anti-clockwise from the horizontal
Output
id_inner: indices of the pixels nested within the 2 ellipse
"""
x1 = np.arange(0,size)
y1 = np.arange(0,size)
x,y = np.meshgrid(y1,x1)
if yc == None:
yc = size/2
if xc == None:
xc = size/2
ellipse_ext = (x-xc)**2/(a+delta)**2+(y-yc)**2/(b+epsilon)**2-1
ellipse_int = (x-xc)**2/(a-delta)**2+(y-yc)**2/(b-epsilon)**2-1
if theta != 0:
ellipse_ext = rot.frame_rotate(ellipse_ext,-theta)
ellipse_int = rot.frame_rotate(ellipse_int,-theta)
id_inner_ellipse = np.where((ellipse_ext < 0) * (ellipse_int > 0))
return id_inner_ellipse
def elliptical_mask_advanced(size,a1,b1,a2,b2,xc1=None,yc1=None,yc2=None,
xc2=None,theta1=0,theta2=0):
"""
Function ellitical_mask builds an elliptical mask. Two ellipses of semi major
axis a1 and a2 and of semi-minor axis b1 and b2 are built.
The mask is 0 everywhere outside the 2 ellipses and 1 within the 2 ellipses.
Arguments:
1. size: the size of the image
2. a1: semi-major axis of the inner ellipse
3. b1: semi-minor axis of the inner ellipse
4. a2: semi-major axis of the outer ellipse
5. b2: semi-minor axis of the outer ellipse
Optional arguments:
6.yc1: the x center of the ellipse in y. By default, size/2
7.xc1: the y center of the ellipse in x. By default, size/2
8. theta1: the position angle of the semi-major axis of the inner ellipse, measured
anti-clockwise from the horizontal
Output
id_inner: indices of the pixels nested within the 2 ellipse
"""
x1 = np.arange(0,size)
y1 = np.arange(0,size)
x,y = np.meshgrid(y1,x1)
if yc1 == None:
yc1 = size/2
if xc1 == None:
xc1 = size/2
if yc2 == None:
yc2 = size/2
if xc2 == None:
xc2 = size/2
ellipse_int = (x-xc1)**2/a1**2+(y-yc1)**2/b1**2-1
ellipse_ext = (x-xc2)**2/a2**2+(y-yc2)**2/b2**2-1
if theta1 != 0:
ellipse_int = rot.frame_rotate(ellipse_int,-theta1)
if theta2 != 0:
ellipse_ext = rot.frame_rotate(ellipse_ext,-theta2)
id_inner_ellipse = np.where((ellipse_ext < 0) * (ellipse_int > 0))
id_outer_ellipse = np.where((ellipse_ext > 0) + (ellipse_int < 0))
return id_inner_ellipse,id_outer_ellipse
def ellipse_polynomial_coeff(a,b,x0,y0,pa):
"""
This function returns the polynomial coefficient of an ellipse which is
parametrized through a semi-major axis a, a semi-minor axis b, an offset
(x0,y0) and a position angle pa measured from North counter-clockwise. The
output is an array called coeff such that the ellipse equation is
coeff[0]*x**2 + coeff[1]*x*y + coeff[2]*y**2 + coeff[3]*x + coeff[4]*y + coeff[5] = 0
with coeff[5]=1
"""
trigo_pa=-pa-math.pi/2
cosa=np.cos(trigo_pa)
sina=np.sin(trigo_pa)
coeff=np.zeros(6)
coeff[0]=a**2*cosa**2+b**2*sina**2
coeff[1]=2*cosa*sina*(b**2-a**2)
coeff[2]=a**2*sina**2+b**2*cosa**2
coeff[3]=a**2*(-2*cosa**2*x0+2*cosa*sina*y0)+b**2*(-2*cosa*sina*y0 - 2*sina**2*x0)
coeff[4]=a**2*(2*cosa*sina*x0 - 2*sina**2*y0)+b**2*(- 2*cosa**2*y0 - 2*cosa*sina*x0)
coeff[5]=-a**2*b**2+a**2*(cosa**2*x0**2 - 2*cosa*sina*x0*y0 + sina**2*y0**2)+b**2*(cosa**2*y0**2+sina**2*x0**2+ 2*cosa*sina*x0*y0)
return coeff/coeff[5]
###############################################################################
###############################################################################
## Algebraic solution for an ellipse fitting
## from http://nicky.vanforeest.com/misc/fitEllipse/fitEllipse.html
###############################################################################
###############################################################################
def fitEllipse(x,y):
"""
This function minimizes
a[0]*x**2 + a[1]*x*y + a[2]*y**2 + a[3]*x + a[4]*y + a[5]
for a set of points (x,y) and returns the coefficients.
"""
x = x[:,np.newaxis]
y = y[:,np.newaxis]
D = np.hstack((x*x, x*y, y*y, x, y, np.ones_like(x)))
S = np.dot(D.T,D)
C = np.zeros([6,6])
C[0,2] = C[2,0] = 2; C[1,1] = -1
E, V = eig(np.dot(inv(S), C))
n = np.argmax(np.abs(E))
a = V[:,n]
return a
def ellipse_center(coeff):
"""
This function converts a set of 6 polynomial coefficients defined as
coeff[0]*x**2 + coeff[1]*x*y + coeff[2]*y**2 + coeff[3]*x + coeff[4]*y + coeff[5]
to the ellipse parameters in a new frame aligned with the axis of the ellipse. It returns
the offset of the ellipse center in this new frame.
Adapted from http://nicky.vanforeest.com/misc/fitEllipse/fitEllipse.html
"""
a,b,c,d,f,g = coeff[0], coeff[1]/2., coeff[2], coeff[3]/2., coeff[4]/2., coeff[5]
delta = b*b-a*c
if delta ==0:
print('Warning the ellipse is degenerate: delta=0 (single point)')
x0=(c*d-b*f)/delta
y0=(a*f-b*d)/delta
return np.array([x0,y0])
def ellipse_angle_of_rotation(coeff):
"""
This function converts a set of 6 polynomial coefficients defined as
coeff[0]*x**2 + coeff[1]*x*y + coeff[2]*y**2 + coeff[3]*x + coeff[4]*y + coeff[5]
to the ellipse parameters in a new frame aligned with the axis of the ellipse. It returns
the position angle of the ellipse.
Adapted from http://nicky.vanforeest.com/misc/fitEllipse/fitEllipse.html
"""
a,b,c,d,f,g = coeff[0] , coeff[1]/2, coeff[2], coeff[3]/2, coeff[4]/2, coeff[5]
if (a == c):
print('Warning: the ellipse is degenerate to a circle, position angle set to 0 by default')
return 0
return 0.5*np.arctan(2*b/(a-c))
#def ellipse_axis_length( a ):
# b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0]
# up = 2*(a*f*f+c*d*d+g*b*b-2*b*d*f-a*c*g)
# down1=(b*b-a*c)*( (c-a)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))
# down2=(b*b-a*c)*( (a-c)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))
# res1=np.sqrt(up/down1)
# res2=np.sqrt(up/down2)
# return np.array([res1, res2])
def ellipse_axis_length(coeff):
"""
This function converts a set of 6 polynomial coefficients defined as
coeff[0]*x**2 + coeff[1]*x*y + coeff[2]*y**2 + coeff[3]*x + coeff[4]*y + coeff[5]
to the ellipse parameters in a new frame aligned with the axis of the ellipse. It returns
the semi-major and semi-minor axis of the ellipse.
Adapted from http://nicky.vanforeest.com/misc/fitEllipse/fitEllipse.html
"""
a,b,c,d,f,g = coeff[0] , coeff[1]/2, coeff[2], coeff[3]/2, coeff[4]/2, coeff[5]
up = 2*(a*f**2+c*d**2+g*b**2-2*b*d*f-a*c*g)
# print((a-c)*(a-c))
down1=(b**2-a*c)*( np.sqrt((a-c)**2+4*b**2)-(a+c))
down2=(b**2-a*c)*(-np.sqrt((a-c)**2+4*b**2)-(a+c))
# print(down1,down2)
res1=np.sqrt(up/down1)
res2=np.sqrt(up/down2)
return np.array([res1, res2])
###############################################################################
###############################################################################
## Least square fit
###############################################################################
###############################################################################
def chi2(param_model, theta, rho, rho_error):
"""
This functions defines a chi squared between measurements given as (theta,rho)
and an ellipse parametrized in the sky plabe by param_model=x0, y0, a, b, alpha
The error of each point is defined as the distance between the point of the
ellipse at the same theta and rho.
"""
x0, y0, a, b, alpha = param_model
x = rho*np.cos(theta)
y = rho*np.sin(theta)
distance_data_to_ell_center = np.sqrt((x-x0)**2+(y-y0)**2)
p=(y0-y)/(x0-x)
phi = np.arctan(a/b*(p*np.cos(alpha)-np.sin(alpha))/(p*np.sin(alpha)+np.cos(alpha)))
distance_ell_to_ell_center = np.sqrt( a**2*np.cos(phi)**2+b**2*np.sin(phi)**2)
sigma2 = rho_error**2
return np.sum((distance_data_to_ell_center-distance_ell_to_ell_center)**2/sigma2)
def chi2_from_deprojected_ellipse(orbital_param_model, theta, rho, rho_error):
"""
This functions defines a chi squared between measurements given as (theta,rho)
and an ellipse parametrized in the orbital plane by (a,e,itilt,omega,Omega).
the angles must be expressed in radians.
The error of each point is defined as the distance between the point of the
ellipse at the same theta and rho.
"""
# a,e,itilt,omega,Omega=orbital_param_model
a,b,x0,y0,alpha=projected_param_from_ellipse_param(*orbital_param_model[0:6],verbose=False)
skyplane_param_model=x0,y0,a,b,alpha
return chi2(skyplane_param_model, theta, rho, rho_error)
###############################################################################
###############################################################################
## Deprojection of the ellipse
###############################################################################
###############################################################################
def deprojection_from_poly_coeff(coeff,verbose=True):
"""
This function takes in input the ellipse polynomial values a such as
coeff[0]*x**2 + coeff[1]*x*y + coeff[2]*y**2 + coeff[3]*x + coeff[4]*y + coeff[5]
and return the deprojected parameters of the ellipse :
omega = argument of pericenter
Omega = longitude of ascending node
a = semi-major axis
e = eccentricity
"""
# This nomenclature is from Smart 1930
A=coeff[0]/coeff[5]
H=coeff[1]/2./coeff[5]
B=coeff[2]/coeff[5]
G=coeff[3]/2./coeff[5]
F=coeff[4]/2./coeff[5]
tan2Omega=(2*(H-F*G))/(F**2-G**2+A-B)
# print(' tan(2Omega)={0:5.2f}'.format(tan2Omega))
Omega=(np.arctan(tan2Omega))/2
tan2ioverp2=2*(H-F*G)/np.sin(2*Omega)
if tan2ioverp2 < 0:
Omega=(np.arctan(tan2Omega)+math.pi)/2
tan2ioverp2=2*(H-F*G)/np.sin(2*Omega)
if verbose:
print('Warning: increase Omega by pi/2 to avoid inconsistency')
p=np.sqrt(2/(F**2+G**2-A-B-tan2ioverp2))
itilt=np.arctan(p*np.sqrt(tan2ioverp2))
denom_tanomega=G*np.cos(Omega)+F*np.sin(Omega)
# print(' denom tan(omega)={0:5.2f}'.format(denom_tanomega))
if denom_tanomega != 0:
omega=np.arctan((F*np.cos(Omega)-G*np.sin(Omega))*np.cos(itilt)/(G*np.cos(Omega)+F*np.sin(Omega)))
else:
omega=0
e=-p/np.cos(omega)*(G*np.cos(Omega)+F*np.sin(Omega))
true_a=p/(1-e**2)
if verbose:
a,b=ellipse_axis_length(coeff)
itilt_before=np.arccos(np.min([a,b])/np.max([a,b]))
pa=ellipse_angle_of_rotation(coeff)
x0,y0=ellipse_center(coeff)
offset_distance=np.sqrt(x0**2+y0**2)
omega_before=np.arctan(y0/x0) #+270
e_before=offset_distance/(b)
print('Parameters of the ellipse before deprojection')
print(' a={0:5.2f}'.format(np.max([a,b])))
print(' e={0:5.3f}'.format(e_before))
print(' offset={0:5.2f}'.format(offset_distance))
print(' direction of offset={0:5.2f} deg (from W ccw)'.format(np.rad2deg(omega_before)))
print(' Omega={0:5.2f} deg'.format(np.rad2deg(pa)))
print(' i={0:5.2f} deg'.format(np.rad2deg(itilt_before)))
print('Parameters of the ellipse after deprojection')
print(' a={0:5.2f}'.format(true_a))
print(' e={0:5.3f}'.format(e))
print(' p={0:5.3f}'.format(p))
print(' omega={0:5.2f} deg'.format(np.rad2deg(omega)))
print(' Omega={0:5.2f} deg'.format(np.rad2deg(Omega)))
print(' i={0:5.2f} deg'.format(np.rad2deg(itilt)))
return [true_a, e, omega, Omega,itilt]
def deprojection_from_ellipse_param(a,b,x0,y0,pa,verbose=True):
"""
This function takes in input the ellipse parameters
param=a,b,x0,y0,pa (in radian) and
returns the deprojected parameters of the ellipse :
a = semi-major axis
e = eccentricity
omega = argument of pericenter in radian
Omega = longitude of ascending node in radian
i = inclination in radian
"""
coeff=ellipse_polynomial_coeff(a,b,x0,y0,pa)
print(coeff)
return deprojection_from_poly_coeff(coeff,verbose=verbose)
#coeff = projected_coeff_from_ellipse_param(a,e,i,omega,Omega)
def projected_coeff_from_ellipse_param(a,e,i,omega,Omega):
"""
This function takes in input true orbital parameters of an ellipse (a,e,i,
omega,Omega), the angles being in radians,
and projects them on the plane of the sky. It returns the polynomial
coefficent of the ellipse in the plane of the sky (notation from Smart 1930)
defined as
coeff[0]*x**2 + coeff[1]*x*y + coeff[2]*y**2 + coeff[3]*x + coeff[4]*y + coeff[5]
"""
n3 = np.cos(i)
cosomega=np.cos(omega)
cosOmega=np.cos(Omega)
sinomega=np.sin(omega)
sinOmega=np.sin(Omega)
l1 = cosOmega*cosomega-sinOmega*sinomega*n3
m1 = sinOmega*cosomega+cosOmega*sinomega*n3
l2 =-cosOmega*sinomega-sinOmega*cosomega*n3
m2 =-sinOmega*sinomega+cosOmega*cosomega*n3
b=a*np.sqrt(1-e**2)
f = 1./(e**2-1)
A = f/n3**2*(m2**2/a**2+m1**2/b**2)
B = f/n3**2*(l2**2/a**2+l1**2/b**2)
H =-f/n3**2*(l2*m2/a**2+l1*m1/b**2)
G = f*e*m2 / (a*n3)
F =-f*e*l2 / (a*n3)
coeff = [A, 2*H, B, 2*G, 2*F, 1.]
return coeff
def projected_param_from_ellipse_param(a,e,i,omega,Omega,verbose=True):
coeff = projected_coeff_from_ellipse_param(a,e,i,omega,Omega)
if verbose:
print(coeff)
x0,y0 = ellipse_center(coeff)
alpha = ellipse_angle_of_rotation(coeff)
a,b = ellipse_axis_length(coeff)
return a,b,x0,y0,alpha
# return deprojection_from_poly_coeff(coeff,verbose=verbose)
def plot_ellipse(a,b,x0,y0,pa,verbose=True):
R = np.arange(0,2*np.pi, 0.01)
x = x0 + a*np.cos(R)*np.cos(pa) - b*np.sin(R)*np.sin(pa)
y = y0 + a*np.cos(R)*np.sin(pa) + b*np.sin(R)*np.cos(pa)
if verbose:
print('x0={0:5.2f} , y0={1:5.2f}'.format(x0,y0))
print('a={0:5.2f} , b={1:5.2f}'.format(a,b))
print('position angle={0:5.2f}'.format(np.rad2deg(pa)))
plt.plot(x,y,'r-')
plt.plot([x0],[y0],'ro')
plt.grid()
if __name__=='__main__':
res = ellipse_points(100.,50.,4.,precision=0.2*math.pi/180, step_R=0.1,plot=True)
``` |
{
"source": "jmilou/pyZELDA",
"score": 4
} |
#### File: pyzelda/utils/circle_fit.py
```python
import numpy as np
from scipy import optimize
from matplotlib import pyplot as plt, cm, colors
def distance_from_center(c, x, y):
'''
Distance of each 2D points from the center (xc, yc)
Parameters
----------
c : array_like
Coordinates of the center
x,y : array_like
Arrays with the x,y coordinates
'''
xc = c[0]
yc = c[1]
Ri = np.sqrt((x-xc)**2 + (y-yc)**2)
return Ri - Ri.mean()
def least_square_circle(x, y):
'''
Least-square determination of the center of a circle
Parameters
----------
x,y : array_like
Arrays with the x,y coordinates of the points on/inside the circle
'''
# coordinates of the barycenter
x_m = np.mean(x)
y_m = np.mean(y)
center_estimate = x_m, y_m
center, ier = optimize.leastsq(distance_from_center, center_estimate, args=(x, y))
# results
xc, yc = center
Ri = np.sqrt((x-xc)**2 + (y-yc)**2)
R = Ri.mean()
residu = np.sum((Ri - R)**2)
return xc, yc, R, residu
def plot_data_circle(x, y, xc, yc, R):
f = plt.figure(0)
plt.axis('equal')
theta_fit = np.linspace(-np.pi, np.pi, 180)
x_fit = xc + R*np.cos(theta_fit)
y_fit = yc + R*np.sin(theta_fit)
plt.plot(x_fit, y_fit, 'b-', label="fitted circle", lw=2)
plt.plot([xc], [yc], 'bD', mec='y', mew=1)
plt.xlabel('x')
plt.ylabel('y')
# plot data
plt.plot(x, y, 'r-.', label='data', mew=1)
plt.legend(loc='best', labelspacing=0.1)
plt.grid()
plt.title('Least Squares Circle')
``` |
{
"source": "jmimassi/PI2CChampionshipRunner",
"score": 3
} |
#### File: jmimassi/PI2CChampionshipRunner/chat.py
```python
from datastore import Datastore
from immutable import List, Map, append, remove
getChats, updateChats, subscribe = Datastore(List())
def postChat(name, message):
updateChats(append(Map({
"name": name,
"message": message
})))
if name == 'Admin':
print(message)
if len(getChats()) > 20:
updateChats(remove(0))
```
#### File: jmimassi/PI2CChampionshipRunner/clients.py
```python
from threading import Lock
import copy
import json
__clients = {}
__match = []
__matchLock = Lock()
def add(address, name, matricules):
'''
Add a client and his matches
'''
with __matchLock:
if address not in __clients:
for opponent in __clients:
__match.append([address, opponent])
__match.append([opponent, address])
__clients[address] = {
'address': address,
'points': 0,
'badMoves': 0,
'matchCount': 0,
'matricules': matricules
}
__clients[address]['name'] = name
__clients[address]['status'] = 'online'
def getMatch():
'''
get the first match of the Queue
'''
players = None
with __matchLock:
if len(__match) > 0:
players = __match[0]
__match[0:1] = []
return players
def get(address):
'''
Get a client
'''
return copy.deepcopy(__clients[address])
def getAll():
'''
Get All Clients
'''
return copy.deepcopy(list(__clients.values()))
def clear():
'''
Clear All Clients and Matches
'''
with __matchLock:
__clients.clear()
__match.clear()
def matchWin(players, winner):
'''
Update clients
'''
for player in players:
__clients[player]['matchCount'] += 1
__clients[players[winner]]['points'] += 3
def matchDraw(players):
'''
Update clients
'''
for player in players:
__clients[player]['points'] += 1
__clients[player]['matchCount'] += 1
def addBadMoves(player, count):
'''
Update clients
'''
__clients[player]['badMoves'] += count
def changeStatus(player, status):
'''
Update client status
'''
__clients[player]['status'] = status
def save():
'''
Save clients in data.json
'''
with open('data.json', 'w', encoding='utf8') as file:
json.dump(list(__clients.values()), file, indent='\t')
```
#### File: games/tictactoe/render.py
```python
from PIL import Image, ImageDraw
import os
root = os.path.dirname(__file__)
cross = Image.open(os.path.join(root, 'cross.png'))
circle = Image.open(os.path.join(root, 'circle.png'))
WIDTH = 600
HEIGHT = 600
SIZE = (WIDTH, HEIGHT)
LINEWIDTH = 5
def render(state):
res = Image.new('RGBA', SIZE, (50, 50, 50))
draw = ImageDraw.Draw(res)
draw.line([(WIDTH//3, 0), (WIDTH//3, HEIGHT)], (200, 200, 200), LINEWIDTH)
draw.line([(2*WIDTH//3, 0), (2*WIDTH//3, HEIGHT)], (200, 200, 200), LINEWIDTH)
draw.line([(0, HEIGHT//3), (WIDTH, HEIGHT//3)], (200, 200, 200), LINEWIDTH)
draw.line([(0, 2*HEIGHT//3), (WIDTH, 2*HEIGHT//3)], (200, 200, 200), LINEWIDTH)
if state is None:
return res
startx = WIDTH//6 - cross.size[0]//2
starty = HEIGHT//6 - cross.size[1]//2
stepx = WIDTH//3
stepy = HEIGHT//3
for i, elem in enumerate(state['board']):
x = i%3
y = i//3
if elem == 'X':
res.paste(cross, (startx+x*stepx, starty+y*stepy), cross)
elif elem == 'O':
res.paste(circle, (startx+x*stepx, starty+y*stepy), circle)
return res
if __name__=='__main__':
image = render(None)
image.save('image.png')
``` |
{
"source": "jminar/chinook",
"score": 2
} |
#### File: chinook/chinook/atomic_mass.py
```python
#@author: ryanday
#MIT License
#Copyright (c) 2018 <NAME>
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import linecache
import pkg_resources
a_file = 'atomic_mass.txt'
filename = pkg_resources.resource_filename(__name__,a_file)
def get_mass_from_number(N_at):
'''
Pull atomic mass for the indicated atomic number
*args*:
- **N_at**: int, atomic number
*return*:
- float, atomic mass, in atomic mass units
***
'''
try:
return float(linecache.getline(filename,int(N_at)).split('\t')[2][:-1])
except IndexError:
print('ERROR: Invalid atomic number, returning mass = 0.')
return 0.0
def get_el_from_number(N_at):
'''
Get symbol for element, given the atomic number
*args*:
- **N_at**: int, atomic number
*return*:
- string, symbol for element
***
'''
try:
return linecache.getline(filename,int(N_at)).split('\t')[1]
except IndexError:
print('ERROR: Invalid atomic number, returning empty string.')
return ''
def get_num_from_el(el):
'''
Get atomic number from the symbol for the associated element. Returns 0 for
invalid entry.
*args*:
- **el**: string, symbol for element
*return*:
- **Z**: int, atomic number.
'''
Z = -1
with open(filename,'r') as mass:
for l in mass:
line = l.split('\t')
if line[1]==el:
Z = int(line[0])
mass.close()
if Z == -1:
print('WARNING!! Invalid symbol passed. Returning with Z = 0')
Z = 0
return Z
```
#### File: chinook/chinook/build_lib.py
```python
import numpy as np
import sys
if sys.version_info<(3,0):
raise ('This software requires Python 3.0 or higher. Please update your Python installation before proceeding')
else:
import chinook.orbital as olib
import chinook.TB_lib as TBlib
import chinook.slab as slib
import chinook.klib as klib
###Build Basis
def gen_basis(basis):
'''
Generate a list of orbital objects as the input basis for a tight-binding model.
User passes a basis dictionary, function returns a modified version of this
same dictionary, with the list of orbitals now appended as the *'bulk'* entry
*args*:
- **basis**--dictionary with keys:
- *'atoms'*: list of integer, indices for distinct atoms,
- *'Z'*: dictionary of integer: *'atom'*:element (integer) pairs
- *'orbs'*: list of lists of string, for each atom containing the
orbital labels (usually in conventional nlxx format)),
- *'pos'*: list of numpy arrays of length 3 float indicating
positions of the atoms in direct Angstrom units,
- optional keys:
- *'orient'*: list, one entry for each atom, indicating a
local rotation of the indicated atom, various formats accepted;
for more details, c.f. **chinook.orbital.py**
- *'spin'*: dictionary of spin information:
- *'bool'*: boolean, double basis into spinor basis,
- *'soc'*: boolean, include spin-orbit coupling
- *'lam'*: dictionary of SOC constants, integer:float
pairs for atoms in *'atoms'* list, and lambda_SOC in eV
*return*:
- **basis** dictionary, modified to include the **bulk** list of orbital
objects
***
'''
bulk_basis = []
required = ['atoms','orbs','pos','Z']
all_present = recur_product([ri in basis.keys() for ri in required])
do_orient = 'orient' in basis.keys()
if do_orient:
for a in range(len(basis['atoms'])):
if len(basis['orient'][a])==1:
basis['orient'][a] = [basis['orient'][a] for i in range(len(basis['orbs'][a]))]
elif len(basis['orient'][a])<len(basis['orbs'][a]):
raise ValueError ('ORIENT ERROR: pass either 1 orientation per orbital for a given atom, or a single orientation for all orbitals on atom')
return None
if not all_present:
raise ValueError ('BASIS GENERATION ERROR!!!! Ensure atoms, atomic numbers, orbitals, positions are all passed to gen_basis in the basis dictionary. See gen_basis.__doc__ for details.')
return None
else:
for a in list(enumerate(basis['atoms'])):
for o in list(enumerate(basis['orbs'][a[0]])):
if do_orient:
bulk_basis.append(olib.orbital(a[1],len(bulk_basis),o[1],basis['pos'][a[0]],basis['Z'][a[1]],orient=basis['orient'][a[0]][o[0]]))
else:
bulk_basis.append(olib.orbital(a[1],len(bulk_basis),o[1],basis['pos'][a[0]],basis['Z'][a[1]]))
if 'spin' in basis.keys():
if basis['spin']['bool']:
bulk_basis = olib.spin_double(bulk_basis,basis['spin']['lam'])
basis['bulk'] = bulk_basis
return basis
def gen_K(Kdic):
'''
Generate k-path for TB model to be diagonalized along.
*args*:
- **Kdic**: dictionary for generation of kpath with:
- *'type'*: string 'A' (absolute) or 'F' (fractional) units
- *'avec'*: numpy array of 3x3 float lattice vectors
- *'pts'*: list of len3 array indicating the high-symmetry points
along the path of interest
- *'grain'*: int, number of points between *each* element of *'pts'*
optional:
- *'labels'*:list of strings with same length as *'pts'*, giving
plotting labels for the kpath
*return*:
**Kobj**: K-object including necessary attributes to be read by the **TB_model**
'''
if 'labels' not in Kdic.keys():
Kdic['labels'] = ['K{:d}'.format(i) for i in range(len(Kdic['pts']))]
required = ['type','pts','grain']
if not recur_product([ri in Kdic.keys() for ri in required]):
raise KeyError('Invalid K-dictionary format. See documentation for gen_K to ensure all required arguments are passed in k-dictionary')
return None
if Kdic['type'] == 'F' and 'avec' not in Kdic.keys():
raise KeyError('Invalid K-dictionary format. Must pass lattice vectors for fractional coordinates')
return None
else:
if Kdic['type']=='F':
B = klib.bvectors(Kdic['avec'])
klist = [np.dot(k,B) for k in Kdic['pts']]
elif Kdic['type']=='A':
klist = [k for k in Kdic['pts']]
else:
klist = []
print('You have not entered a valid K path. Proceed with caution.')
Kobj = klib.kpath(klist,Kdic['grain'],Kdic['labels'])
return Kobj
###Built Tight Binding Model
def gen_TB(basis_dict,hamiltonian_dict,Kobj=None,slab_dict=None):
'''
Build a Tight-Binding Model using the user-input dictionaries
*args*:
- **basis_dict**: dictionary, including the *'bulk'* key value pair
generated by **gen_basis**
- **hamiltonian_dict**: dictionary,
- *'spin'*: same dictionary as passed to **gen_basis**
- *'type'*: string, Hamiltonian type--'list' (list of matrix elements),
'SK' (Slater-Koster dictionaries, requires also a 'V' and 'avec' entry),
'txt' (textfile, requires a 'filename' key as well)
- *'cutoff'*: float, cutoff hopping distance
- *'renorm'*: optional float, renormalization factor default to 1.0
- *'offset'*: optional float, offset of chemical potential, default to 0.0
- *'tol'*: optional float, minimum matrix element tolerance, default to 1e-15
- **Kobj**: optional, standard K-object, as generated by **gen_K**
- **slab_dict**: dictionary for slab generation
- *'avec'*: numpy array of 3x3 float, lattice vectors
- *'miller'*: numpy array of 3 integers, indicating the Miller
index of the surface normal in units of lattice vectors
- *'fine'*: fine adjustment of the slab thickness, tuple of two
numeric to get desired termination correct (for e.g. inversion symmetry)
- *'thick'*: integer approximate number of unit cells in the
slab (will not be exact, depending on the fine, and termination
- *'vac'*: int size of the vacuum buffer -- must be larger than
the largest hopping length to ensure no coupling of slabs
- *'termination'*: tuple of 2 integers: atom indices which
terminate the top and bottom of the slab
*return*:
**TB_model**: tight-binding object, as defined in **chinook.TB_lib.py**
'''
if 'spin' not in hamiltonian_dict.keys():
# if omitted, assume no spin-degree of freedom desired
print('No spin-information entered, assuming no spin-degree of freedom in the following. See build_lib.py for details if spin is desired.')
hamiltonian_dict['spin']={'bool':False}
required = ['type','cutoff']
if not recur_product([ri in hamiltonian_dict.keys() for ri in required]):
raise ValueError ('Ensure all requisite arguments passed in the Hamiltonian dictionary. see gen_TB documentation for details.')
return None
else:
if 'renorm' not in hamiltonian_dict.keys():
hamiltonian_dict['renorm'] = 1.0
if 'offset' not in hamiltonian_dict.keys():
hamiltonian_dict['offset'] = 0.0
if 'tol' not in hamiltonian_dict.keys():
hamiltonian_dict['tol'] = 1e-15
if hamiltonian_dict['type']=='SK' and ('V' not in hamiltonian_dict.keys() or 'avec' not in hamiltonian_dict.keys()):
raise ValueError ('PLEASE INCLUDE THE DICTIONARY OF Slater-Koster elements as "V" in the Hamiltonian dictionary, and lattice vectors "avec" as numpy array of 3x3 float.')
return None
elif hamiltonian_dict['type']=='txt' and 'filename' not in hamiltonian_dict.keys():
raise ValueError ('No "filename" included in Hamiltonian dictionary keys for text-based Hamiltonian entry.')
return None
elif hamiltonian_dict['type']=='list' and 'list' not in hamiltonian_dict.keys():
raise KeyError ('No "list" included in Hamiltonian dictionary keys for list-based Hamiltonian entry.')
return None
else:
if type(slab_dict)==dict:
if hamiltonian_dict['spin']['bool']:
basis_dict['bulk'] = basis_dict['bulk'][:int(len(basis_dict['bulk'])/2)]
Hspin = True
hamiltonian_dict['spin']['bool'] = False #temporarily forestall incorporation of spin
else:
Hspin=False
TB = TBlib.TB_model(basis_dict['bulk'],hamiltonian_dict,Kobj)
if type(slab_dict)==dict:
slab_dict['TB'] = TB
print('running bulk_to_slab now')
TB,slab_H,Rmat = slib.bulk_to_slab(slab_dict)
if Hspin:
TB.basis = olib.spin_double(list(TB.basis),basis_dict['spin']['lam'])
hamiltonian_dict['type']='list'
hamiltonian_dict['list'] = slab_H
hamiltonian_dict['avec'] = TB.avec
hamiltonian_dict['spin']['bool']=Hspin
TB.mat_els = TB.build_ham(hamiltonian_dict)
return TB
def recur_product(elements):
'''
Utility function: Recursive evaluation of the product of all elements in a list
*args*:
- **elements**: list of numeric type
*return*:
- product of all elements of **elements**
***
'''
if len(elements)==1:
return elements[0]
else:
return elements[0]*recur_product(elements[1:])
```
#### File: chinook/chinook/H_library.py
```python
import numpy as np
import chinook.SlaterKoster as SK
import chinook.rotation_lib as rot_lib
import chinook.Ylm as Ylm
hb = 6.626*10**-34/(2*np.pi)
c = 3.0*10**8
q = 1.602*10**-19
A = 10.0**-10
me = 9.11*10**-31
mn = 1.67*10**-27
kb = 1.38*10**-23
def txt_build(filename,cutoff,renorm,offset,tol):
'''
Build Hamiltonian from textfile, input is of form
o1,o2,x12,y12,z12,t12, output in form [o1,o2,x12,y12,z12,t12].
To be explicit, each row of the textfile is used to generate a
k-space Hamiltonian matrix element of the form:
.. math::
H_{1,2}(k) = t_{1,2} e^{i (k_x x_{1,2} + k_y y_{1,2} + k_z z_{1,2})}
*args*:
- **filename**: string, name of file
- **cutoff**: float, maximum distance of hopping allowed, Angstrom
- **renorm**: float, renormalization of the bandstructure
- **offset**: float, energy offset of chemical potential, electron volts
- **tol**: float, minimum Hamiltonian matrix element amplitude
*return*:
- **Hlist**: the list of Hamiltonian matrix elements
***
'''
Hlist = []
with open(filename,'r') as origin:
for line in origin:
spl = line.split(',')
R = np.array([float(spl[2]),float(spl[3]),float(spl[4])])
Hval = complex(spl[5])
if len(spl)>6:
Hval+=1.0j*float(spl[6])
if abs(Hval)>tol and np.linalg.norm(R)<cutoff:
Hval*=renorm
if np.linalg.norm(R)==0.0:
Hval-=offset
tmp = [int(spl[0]),int(spl[1]),R[0],R[1],R[2],Hval]
Hlist.append(tmp)
origin.close()
return Hlist
def sk_build(avec,basis,Vdict,cutoff,tol,renorm,offset):
'''
Build SK model from using D-matrices, rather than a list of SK terms from table.
This can handle orbitals of arbitrary orbital angular momentum in principal,
but right now implemented for up to and including f-electrons.
NOTE: f-hoppings require thorough testing
*args*:
- **avec**: numpy array 3x3 float, lattice vectors
- **basis**: list of orbital objects
- **Vdict**: dictionary, or list of dictionaries, of Slater-Koster integrals/ on-site energies
- **cutoff**: float or list of float, indicating range where Vdict is applicable
- **tol**: float, threshold value below which hoppings are neglected
- **offset**: float, offset for Fermi level
*return*:
- **H_raw**: list of Hamiltonian matrix elements, in form [o1,o2,x12,y12,z12,t12]
***
'''
Vdict,cutoff,pts = cluster_init(Vdict,cutoff,avec) #build region of lattice points, containing at least the cutoff distance
V = Vdict[0]
if basis[0].spin!=basis[-1].spin: #only calculate explicitly for a single spin species
brange = int(len(basis)/2)
else:
brange = len(basis)
SK_matrices = SK.SK_full(basis[:brange]) #generate the generalized Slater-Koster matrices, as functions of R and potential V
index_orbitals = index_ordering(basis[:brange]) #define the indices associated with the various orbital shells in the basis,
H_raw = on_site(basis[:brange],V,offset) #fill in the on-site energies
for i1 in index_orbitals:
for i2 in index_orbitals:
if index_orbitals[i1][index_orbitals[i1]>-1].min()<=index_orbitals[i2][index_orbitals[i2]>-1].min():
o1o2 = (i1[0],i2[0],i1[1],i2[1],i1[2],i2[2])
R12 = (np.array(i2[3:6])-np.array(i1[3:6]))
SKmat = SK_matrices[o1o2]
for p in pts: #iterate over the points in the cluster
Rij = R12 + np.dot(p,avec)
Rijn = np.linalg.norm(Rij) #compute norm of the vector
#
if 0<Rijn<cutoff[-1]: #only proceed if within the cutoff distance
V = Vdict[np.where(Rijn>=cutoff)[0][-1]]
Vlist = Vlist_gen(V,o1o2)
if Vlist is None:
continue
elif len(Vlist)==0:
continue
Euler_A,Euler_B,Euler_y = rot_lib.Euler(rot_lib.rotate_v1v2(Rij,np.array([0,0,1])))
SKvals = mirror_SK([vi for vi in Vlist])
SKmat_num = SKmat(Euler_A,Euler_B,Euler_y,SKvals) #explicitly compute the relevant Hopping matrix for this vector and these shells
if abs(SKmat_num).max()>tol:
append = mat_els(Rij,SKmat_num,tol,index_orbitals[i1],index_orbitals[i2])
H_raw = H_raw + append
return H_raw #finally return the list of Hamiltonian matrix elements
def on_site(basis,V,offset):
'''
On-site matrix element calculation. Try both anl and alabel formats,
if neither is defined, default the onsite energy to 0.0 eV
*args*:
- **basis**: list of orbitals defining the tight-binding basis
- **V**: dictionary, Slater Koster terms
- **offset**: float, EF shift
*return*:
- **Ho**: list of Hamiltonian matrix elements
***
'''
Ho = []
for oi in basis:
try:
H = V['{:d}{:d}{:d}'.format(oi.atom,oi.n,oi.l)]
except KeyError:
try:
H = V['{:d}{:s}'.format(oi.atom,oi.label)]
except KeyError:
H = 0.0
Ho.append([oi.index,oi.index,0.0,0.0,0.0,float(H-offset)])
return Ho
def mat_els(Rij,SKmat,tol,i1,i2):
'''
Extract the pertinent, and non-zero elements of the Slater-Koster matrix
and transform to the conventional form of Hamiltonian list entries
(o1,o2,Rij0,Rij1,Rij2,H12(Rij))
*args*:
- **Rij**: numpy array of 3 float, relevant connecting vector
- **SKmat**: numpy array of float, matrix of hopping elements
for the coupling of two orbital shells
- **tol**: float, minimum hopping included in model
- **i1**, **i2**: int,int, proper index ordering for the relevant
instance of the orbital shells involved in hopping
*return*:
- **out**: list of Hamiltonian matrix elements, extracted from the
ordered SKmat, in form [[o1,o2,x12,y12,z12,H12],...]
***
'''
inds = np.where(abs(SKmat)>tol)
out = []
for ii in range(len(inds[0])):
i_1 = i1[inds[0][ii]]
i_2 = i2[inds[1][ii]]
if -1<i_1<=i_2:
out.append([i_1,i_2,*Rij,SKmat[inds[0][ii],inds[1][ii]]])
return out
def index_ordering(basis):
'''
We use an universal ordering convention for defining the Slater-Koster matrices
which may (and most likely will) not match the ordering chosen by the user.
To account for this, we define a dictionary which gives the ordering, relative
to the normal order convention defined here, associated with a given a-n-l shell
at each site in the lattice basis.
*args*:
- **basis**: list of orbital objects
*return*:
- **indexing**: dictionary of key-value pairs (a,n,l,x,y,z):numpy.array([...])
***
'''
normal_order = {0:{'':0},1:{'x':0,'y':1,'z':2},2:{'xz':0,'yz':1,'xy':2,'ZR':3,'XY':4},3:{'z3':0,'xz2':1,'yz2':2,'xzy':3,'zXY':4,'xXY':5,'yXY':6}}
indexing = {}
for b in basis:
anl = (b.atom,b.n,b.l,*np.around(b.pos,4))
if anl not in indexing.keys():
indexing[anl] = -1*np.ones(2*b.l+1)
indexing[anl][normal_order[b.l][b.label[2:]]] = b.index
return indexing
def Vlist_gen(V,pair):
'''
Select the relevant hopping matrix elements to be used in defining the value
of the Slater-Koster matrix elements for a given pair of orbitals. Handles situation where
insufficient parameters have been passed to system.
*args*:
- **V**: dictionary of Slater-Koster hopping terms
- **pair**: tuple of int defining the orbitals to be paired, (a1,a2,n1,n2,l1,l2)
*return*:
- **Vvals**: numpy array of Vllx related to a given pairing, e.g. for s-p np.array([Vsps,Vspp])
***
'''
order = {'S':0,'P':1,'D':2,'F':3,0:'S',1:'P',2:'D',3:'F'}
vstring = '{:d}{:d}{:d}{:d}{:d}{:d}'.format(*pair[:6])
l = max(pair[4],pair[5])
if len(V.keys())<(l+1):
print('WARNING, insufficient number of Slater-Koster parameters passed: filling missing values with zeros.')
for l_index in range(l+1):
hopping_type = vstring+order[l_index]
if hopping_type not in V.keys():
V[hopping_type] = 0
try:
Vkeys = np.array(sorted([[l-order[vi[-1]],vi] for vi in V if vi[:-1]==vstring]))[:,1]
Vvals = np.array([V[vk] for vk in Vkeys])
except IndexError:
vstring = '{:d}{:d}{:d}{:d}{:d}{:d}'.format(pair[1],pair[0],pair[3],pair[2],pair[5],pair[4])
try:
Vkeys = np.array(sorted([[l-order[vi[-1]],vi] for vi in V if vi[:-1]==vstring]))[:,1]
pre = (-1)**(pair[4]+pair[5]) #relative parity of the two coupled states
Vvals = pre*np.array([V[vk] for vk in Vkeys])
except IndexError:
return None
return Vvals
def mirror_SK(SK_in):
'''
Generate a list of values which is the input appended with its mirror
reflection. The mirror boundary condition suppresses the duplicate of the
last value. e.g. [0,1,2,3,4] --> [0,1,2,3,4,3,2,1,0],
['r','a','c','e','c','a','r'] --> ['r','a','c','e','c','a','r','a','c','e','c','a','r']
Intended here to take an array of Slater-Koster hopping terms and reflect about
its last entry i.e. [Vsps,Vspp] -> [Vsps,Vspp,Vsps]
*args*:
- **SK_in**: iterable, of arbitrary length and data-type
*return*:
- list of values with same data-type as input
***
'''
return list(SK_in) + (SK_in[-2::-1])
def cluster_init(Vdict,cutoff,avec):
'''
Generate a cluster of neighbouring lattice points to use
in defining the hopping paths--ensuring that it extends
sufficiently far enough to capture even the largest hopping vectors.
Also reforms the SK dictionary and cutoff lengths to be in list format.
Returns an array of lattice points which go safely to the edge of the cutoff range.
*args*:
- **Vdict**: dictionary, or list of dictionaries of Slater Koster matrix elements
- **cutoff**: float, or list of float
- **avec**: numpy array of 3x3 float
*return*:
- **Vdict**: list of length 1 if a single dictionary passed, else unmodified
- **cutoff**: numpy array, append 0 to the beginning of the cutoff list,
else leave it alone.
- **pts**: numpy array of lattice vector indices for a region of lattice points around
the origin.
***
'''
if isinstance(cutoff,(int,float)) and not isinstance(cutoff,bool):
cutoff = np.array([0.0,cutoff])
Vdict = [Vdict]
else:
if cutoff[0]>0:
cutoff.insert(0,0)
cutoff = np.array(cutoff)
else:
cutoff = np.array(cutoff)
pt_max = np.ceil(np.array([(cutoff).max()/np.linalg.norm(avec[i]) for i in range(len(avec))]).max())
pts = region(int(pt_max)+1)
return Vdict,cutoff,pts
###############################################################################
#########################Spin Orbit Coupling###################################
###############################################################################
def spin_double(H,lb):
'''
Duplicate the kinetic Hamiltonian terms to extend over the spin-duplicated
orbitals, which are by construction in same order and appended to end of the
original basis.
*args*:
- **H**: list, Hamiltonian matrix elements [[o1,o2,x,y,z,H12],...]
- **lb**: int, length of basis before spin duplication
*return*:
- **h2** modified copy of **H**, filled with kinetic terms for both
spin species
***
'''
lenb = int(lb/2)
h2 = []
for i in range(len(H)):
h2.append([H[i][0]+lenb,H[i][1]+lenb,H[i][2],H[i][3],H[i][4],H[i][5]])
return h2
def SO(basis):
'''
Generate L.S matrix-elements for a given basis.
This is generic to all l, except the normal_order, which is defined here up to
and including the f electrons.
Otherwise, this method is generic to any orbital angular momentum.
In the factors dictionary defined here indicates the weight of the
different :math:`L_iS_i` terms. The keys are tuples of (L+/-/z,S+/-/z)
in a bit of a cryptic way: for L, (0,1,2) ->(-1,0,1) and
for S, (-1,0,1) = S1-S2 with S1,2 = +/- 1 here
L+,L-,Lz matrices are defined for each l shell in the basis,
transformed into the basis of the tight-binding model.
The nonzero terms will then just be used along with the spin and
weighted by the factor value, and slotted into a len(**basis**)xlen(**basis**) matrix **HSO**
*args*:
- **basis**: list of orbital objects
*return*:
- **HSO**: list of matrix elements in standard format [o1,o2,0,0,0,H12]
***
'''
Md = Ylm.Yproj(basis)
normal_order = {0:{'':0},1:{'x':0,'y':1,'z':2},2:{'xz':0,'yz':1,'xy':2,'ZR':3,'XY':4},3:{'z3':0,'xz2':1,'yz2':2,'xzy':3,'zXY':4,'xXY':5,'yXY':6}}
factors = {(2,-1):0.5,(0,1):0.5,(1,0):1.0}
L,al = {},[]
HSO = []
for o in basis[:int(len(basis)/2)]:
if (o.atom,o.n,o.l) not in al:
al.append((o.atom,o.n,o.l))
Mdn = Md[(o.atom,o.n,o.l,-1)]
Mup = Md[(o.atom,o.n,o.l,1)]
Mdnp = np.linalg.inv(Mdn)
Mupp = np.linalg.inv(Mup)
L[(o.atom,o.n,o.l)] = [np.dot(Mupp,np.dot(Lm(o.l),Mdn)),np.dot(Mupp,np.dot(Lz(o.l),Mup)),np.dot(Mdnp,np.dot(Lp(o.l),Mup))]
for o1 in basis:
for o2 in basis:
if o1.index<=o2.index:
LS_val = 0.0
if np.linalg.norm(o1.pos-o2.pos)<0.0001 and o1.l==o2.l and o1.n==o2.n:
inds = (normal_order[o1.l][o1.label[2:]],normal_order[o2.l][o2.label[2:]])
ds = (o1.spin-o2.spin)/2.
if ds==0:
s=0.5*np.sign(o1.spin)
else:
s=1.0
for f in factors:
if f[1]==ds:
LS_val+=o1.lam*factors[f]*L[(o1.atom,o1.n,o1.l)][f[0]][inds]*s
HSO.append([o1.index,o2.index,0.,0.,0.,LS_val])
return HSO
def Lp(l):
'''
L+ operator in the :math:`l`, :math:`m_l` basis, organized with
(0,0) = |l,l>... (2l,2l) = |l,-l>
The nonzero elements are on the upper diagonal
*arg*:
- **l**: int orbital angular momentum
*return*:
- **M**: numpy array (2l+1,2l+1) of real float
***
'''
M = np.zeros((2*l+1,2*l+1))
r = np.arange(0,2*l,1)
M[r,r+1]=1.0
vals = [0]+[np.sqrt(l*(l+1)-(l-m)*(l-m+1)) for m in range(1,2*l+1)]
M = M*vals
return M
def Lm(l):
'''
L- operator in the l,m_l basis, organized with
(0,0) = |l,l>... (2l,2l) = |l,-l>
The nonzero elements are on the upper diagonal
*arg*:
- **l**: int orbital angular momentum
*return*:
- **M**: numpy array (2l+1,2l+1) of real float
***
'''
M = np.zeros((2*l+1,2*l+1))
r = np.arange(1,2*l+1,1)
M[r,r-1]=1.0
vals = [np.sqrt(l*(l+1)-(l-m)*(l-m-1)) for m in range(0,2*l)]+[0]
M = M*vals
return M
def Lz(l):
'''
Lz operator in the l,:math:`m_l` basis
*arg*:
- **l**: int orbital angular momentum
*return*:
- numpy array (2*l+1,2*l+1)
***
'''
return np.identity(2*l+1)*np.array([l-m for m in range(2*l+1)])
def AFM_order(basis,dS,p_up,p_dn):
'''
Add antiferromagnetism to the tight-binding model, by adding a different on-site energy to
orbitals of different spin character, on the designated sites.
*args*:
- **basis**: list, orbital objects
- **dS**: float, size of spin-splitting (eV)
- **p_up**, **p_dn**: numpy array of float indicating the orbital positions
for the AFM order
*return*:
- **h_AF**: list of matrix elements, as conventionally arranged [[o1,o2,0,0,0,H12],...]
***
'''
h_AF = []
for bi in basis:
if np.linalg.norm(bi.pos-p_up)==0:
if bi.spin<0:
h_AF.append([bi.index,bi.index,0,0,0,dS])
else:
h_AF.append([bi.index,bi.index,0,0,0,-dS])
elif np.linalg.norm(bi.pos-p_dn)==0:
if bi.spin<0:
h_AF.append([bi.index,bi.index,0,0,0,-dS])
else:
h_AF.append([bi.index,bi.index,0,0,0,dS])
return h_AF
def FM_order(basis,dS):
'''
Add ferromagnetism to the system. Take dS to assume that the splitting puts
spin-up lower in energy by dS,and viceversa for spin-down. This directly
modifies the *TB_model*'s **mat_els** attribute
*args*:
- **basis**: list, of orbital objects in basis
- **dS**: float, energy of the spin splitting (eV)
*return*:
- list of matrix elements [[o1,o2,0,0,0,H12],...]
***
'''
return [[bi.index,bi.index,0,0,0,-np.sign(bi.spin)*dS] for bi in basis]
#def Efield(basis,field,orbital_type='Slater'):
'''
Define a set of matrix elements which introduce an electric field, treated at the level of a dipole operator.
TODO
'''
# return None
def region(num):
'''
Generate a symmetric grid of points in number of lattice vectors.
*args*:
- **num**: int, grid will have size 2*num+1 in each direction
*return*:
- numpy array of size ((2*num+1)**3,3) with centre value of first entry
of (-num,-num,-num),...,(0,0,0),...,(num,num,num)
***
'''
num_symm = 2*num+1
return np.array([[int(i/num_symm**2)-num,int(i/num_symm)%num_symm-num,i%num_symm-num] for i in range((num_symm)**3)])
```
#### File: chinook/chinook/lattice_lib.py
```python
import numpy as np
import matplotlib.pyplot as plt
class lattice:
'''
Primarily a utility class, for use in visualizing lattice and
basis. Also includes some tools for converting between different
unit conventions.
'''
def __init__(self,avec,basis):
self.avec = avec
self.ivec = np.linalg.inv(avec)
self.pos,self.fpos = self.parse_basis(basis,3)
self.edges = self.cell_edges()
def parse_basis(self,basis,nmax):
'''
Take orbital basis and establish all equivalent locations
of each atom in the basis within a region near the origin
*args*:
- **basis**: list of orbital objects
- **nmax**: int, number of neighbouring cells to consider
*return*:
- **atoms**: dictionary, key values indications atoms, all positions
- **frac_posns**: same as **atoms**, but in lattice vector units
***
'''
atoms = {}
frac_posns = {}
for bi in basis:
if bi.atom not in atoms:
atoms[bi.atom] = [tuple(bi.pos)]
else:
atoms[bi.atom].append(tuple(bi.pos))
for ai in atoms:
atoms[ai] = np.array(list(dict.fromkeys(atoms[ai])))
fpos =self.frac_pos(atoms[ai])
frac_posns[ai] = neighbours(fpos,nmax)
atoms[ai] = np.einsum('ij,ki->kj',self.avec,frac_posns[ai])
return atoms,frac_posns
def frac_pos(self,posns):
'''
Inverse multiplication of lattice vectors with position vector,
to get position in units of lattice vectors, rather than direct units
of Angstrom
*args*:
- **posns**: numpy array of Nx3 float
*return*:
- numpy array of Nx3 float
***
'''
return np.einsum('ji,kj->ki',self.ivec,posns)
def draw_lattice(self,ax=None):
'''
Plotting utility function, display unit cell parallelepiped, and
atoms inside
*kwargs*:
- **ax**: matplotlib Axes, for plotting onto existing axes
*return*:
- **ax**: matplotlib Axes, for further editing of plots
***
'''
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111,projection='3d')
for ii in range(len(self.edges)):
ax.plot(self.edges[ii,:,0],self.edges[ii,:,1],self.edges[ii,:,2],c='k')
for ai in self.fpos:
ax.scatter(self.pos[ai][:,0],self.pos[ai][:,1],self.pos[ai][:,2])
return ax
def cell_edges(self):
'''
Evaluate the edges forming an enclosing volume for the unit cell.
*args*:
- **avec**: numpy array of 3x3 float, lattice vectors
*return*:
- **edges**: numpy array of 24x2x3 float, indicating the endpoints of all
bounding edges of the cell
***
'''
modvec = np.array([[np.mod(int(j/4),2),np.mod(int(j/2),2),np.mod(j,2)] for j in range(8)])
edges = []
for p1 in range(len(modvec)):
for p2 in range(p1,len(modvec)):
if np.linalg.norm(modvec[p1]-modvec[p2])==1:
edges.append([np.dot(self.avec.T,modvec[p1]),np.dot(self.avec.T,modvec[p2])])
edges = np.array(edges)
return edges
def lattice_pars_to_vecs(norm_a,norm_b,norm_c,ang_a,ang_B,ang_y):
'''
A fairly standard way to define lattice vectors is in terms of the vector lengths,
and their relative angles--defining in this way a parallelepiped unit cell. Use
this notation to translate into a numpy array of 3x3 array of float
*args*:
- **norm_a**, **norm_b**, **norm_c**: float, length vectors, Angstrom
- **ang_a**, **ang_B**, **ang_y**: float, angles between (b,c), (a,c), (a,b) in degrees
*return*:
- numpy array of 3x3 float: unit cell vectors
***
'''
rad = np.pi/180
ang_a *=rad
ang_B *=rad
ang_y *=rad
vec1 = np.array([norm_a,0,0])
vec2 = norm_b*np.array([np.cos(ang_y),np.sin(ang_y),0])
vec3 = norm_c*np.array([np.cos(ang_B),(np.cos(ang_a)-np.cos(ang_B)*np.cos(ang_y))/np.sin(ang_y),0])
vec3[2] = np.sqrt(norm_c**2 - vec3[0]**2 - vec3[1]**2)
return np.around(np.array([vec1,vec2,vec3]),5)
def neighbours(pos,num):
'''
Build series of lattice points using the pos arrays, out to some fixed number of points away
from origin
*args*:
- **pos**: numpy array of 3x3 float
- **num**: int, number of sites to consider
*return*:
- **inside**: numpy array of Nx3 float, all points in neighbouring region of lattice
num_symm= 2*num+1
points = np.array([[int(i/num_symm**2)-num,int(i/num_symm)%num_symm-num,i%num_symm-num] for i in range((num_symm)**3)])
inside = []
for pi in pos:
all_points = points + pi
all_points = all_points[np.where((all_points[:,0]>=0) & (all_points[:,0]<1) & (all_points[:,1]>=0) & (all_points[:,1]<1) & (all_points[:,2]>=0) & (all_points[:,2]<1))]
inside.append(*all_points)
return np.array(inside)
```
#### File: chinook/chinook/slab.py
```python
import numpy as np
from operator import itemgetter
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import chinook.rotation_lib as rotlib
import chinook.orbital as olib
import chinook.TB_lib as TB_lib
import chinook.surface_vector as surface_vector
def GCD(a,b):
'''
Basic greatest common denominator function. First find all divisors of each a and b.
Then find the maximal common element of their divisors.
*args*:
- **a**, **b**: int
*return*:
- int, GCD of **a**, **b**
***
'''
dA,dB = divisors(a),divisors(b)
return max(dA[i] for i in range(len(dA)) if dA[i] in dB)
def LCM(a,b):
'''
Basic lowest-common multiplier for two values a,b. Based on idea that LCM is just the
product of the two input, divided by their greatest common denominator.
*args*:
- **a**, **b**: int
*return*:
- int, LCM of **a** and **b**
***
'''
return int(a*b/GCD(a,b))
def divisors(a):
'''
Iterate through all integer divisors of integer input
*args*:
- **a**: int
*return*:
list of int, divisors of **a**
***
'''
return [int(a/i) for i in range(1,a+1) if (a/i)%1==0]
def LCM_3(a,b,c):
'''
For generating spanning vectors, require lowest common multiple of 3
integers, itself just the LCM of one of the numbers, and the LCM of the other two.
*args*:
- **a**, **b**, **c**: int
*return*:
int, LCM of the three numbers
***
'''
return LCM(a,LCM(b,c))
def iszero(a):
'''
Find where an iterable of numeric is zero, returns empty list if none found
*args*:
- **a**: numpy array of numeric
*return*:
- list of int, indices of iterable where value is zero
***
'''
return np.where(a==0)[0]
def nonzero(a):
'''
Find where an iterable of numeric is non-zero, returns empty list if none found
*args*:
- **a**: numpy array of numeric
*return*:
- list of int, indices of iterable where value is non-zero
***
'''
return np.where(a!=0)[0]
def abs_to_frac(avec,vec):
'''
Quick function for taking a row-ordered matrix of lattice vectors:
| a_11 a_12 a_13 |
| a_21 a_22 a_23 |
| a_31 a_32 a_33 |
and using it to transform a vector, written in absolute units, to fractional units.
Note this function can be used to broadcast over N vectors you would like to transform
*args*:
- **avec**: numpy array of 3x3 float lattice vectors, ordered by rows
- **vec**: numpy array of Nx3 float, vectors to be transformed to
fractional coordinates
*return*:
- Nx3 array of float, vectors translated into basis of lattice vectors
***
'''
return np.dot(vec,np.linalg.inv(avec))
def frac_to_abs(avec,vec):
'''
Same as abs_to_frac, but in opposite direction,from fractional to absolute coordinates
*args*:
- **avec**: numpy array of 3x3 float, lattice vectors, row-ordered
- **vec**: numpy array of Nx3 float, input vectors
*return*:
- N x 3 array of float, vec in units of absolute coordinates (Angstrom)
***
'''
return np.dot(vec,avec)
def p_vecs(miller,avec):
'''
Produce the vectors p, as defined by Ceder, to be used in defining spanning
vectors for plane normal to the Miller axis
*args*:
- **miller**: numpy array of len 3 float
- **avec**: numpy array of size 3x3 of float
*return*:
- **pvecs**: numpy array size 3x3 of float
***
'''
n_zero = nonzero(miller)
zero = iszero(miller)
pvecs = np.zeros((3,3))
abs_miller = abs(miller)
sgn_miller = np.sign(miller)
if len(n_zero)==3:
M = LCM_3(*abs_miller)
M*=sgn_miller
pvecs = np.array([avec[0]*M/miller[0],
avec[1]*M/miller[1],
avec[2]*M/miller[2]])
elif len(n_zero)==2:
M = LCM(*miller[n_zero])
M = LCM_3(*abs_miller)
M*=sgn_miller
pvecs[n_zero[0]] = M/miller[n_zero[0]]*avec[n_zero[0]]
pvecs[n_zero[1]] = M/miller[n_zero[1]]*avec[n_zero[1]]
pvecs[zero[0]] = pvecs[n_zero[0]] + avec[zero[0]]
elif len(n_zero)==1:
pvecs[n_zero[0]] = np.zeros(3)
pvecs[zero[0]] = avec[zero[0]]
pvecs[zero[1]] = avec[zero[1]]
return pvecs
def v_vecs(miller,avec):
'''
Wrapper for functions used to determine the vectors used to define the new,
surface unit cell.
*args*:
- **miller**: numpy array of 3 int, Miller indices for surface normal
- **avec**: numpy array of 3x3 float, Lattice vectors
*return*:
- **vvecs**: new surface unit cell vectors numpy array of 3x3 float
'''
pvecs = p_vecs(miller,avec)
vvecs = np.zeros((3,3))
vvecs[0] = pvecs[1]-pvecs[0]
vvecs[1] = pvecs[2]-pvecs[0]
vvecs[2] = surface_vector.find_v3(vvecs[0],vvecs[1],avec,40)
return vvecs
def basal_plane(vvecs):
'''
Everything is most convenient if we redefine the basal plane of the surface
normal to be oriented within a Cartesian plane. To do so, we take the
v-vectors. We get the norm of v1,v2 and then find the cross product with
the z-axis, as well as the angle between these two vectors. We can then
rotate the surface normal onto the z-axis.
In this way we conveniently re-orient the v1,v2 axes into the Cartesian x,y plane.
*args*:
- **vvecs**: numpy array 3x3 float
*return*:
- **vvec_prime**: numpy array 3x3 of float, rotated v vectors
- **Rmat**: numpy array of 3x3 float, rotation matrix to send original
coordinate frame into the rotated coordinates.
***
'''
norm = np.cross(vvecs[0],vvecs[1])
norm = norm/np.linalg.norm(norm)
Rmat = rotlib.rotate_v1v2(norm,np.array([0,0,1])).T
vvec_prime = np.dot(vvecs,Rmat)
# Now perform one more rotation, taking vp[0] onto the [100] Cartesian axis
phi = -np.arccos(vvec_prime[0,0]/np.linalg.norm(vvec_prime[0]))
Rmat_prime = rotlib.Rodrigues_Rmat(np.array([0,0,1]),phi).T
Rmat = Rmat@Rmat_prime #matrix multiplication of the two operators
vvec_prime = np.dot(vvecs,Rmat)
vvec_prime = np.around(vvec_prime,4)
Rmat = np.around(Rmat,15)
return vvec_prime,Rmat
def par(avec):
'''
Definition of the parallelepiped, as well as a containing region within the
Cartesian projection of this form which can then be used to guarantee correct
definition of the new cell basis. The parallelipiped is generated, and then
its extremal coordinates established, from which a containing parallelepiped is
then defined.
*args*:
- **avec**: numpy array of 3x3 float
*return*:
- **vert**: numpy array 8x3 float vertices of parallelepiped
- **box_pts**: numpy array 8 x 3 float vertices of containing box
***
'''
pts = np.array([[int(i/4),int(np.mod(i/2,2)),int(np.mod(i,2))] for i in range(8)])
vert = np.dot(pts,avec)
alpha,omega = np.array([vert[:,0].min(),vert[:,1].min(),vert[:,2].min()]),np.array([vert[:,0].max(),vert[:,1].max(),vert[:,2].max()])
box = np.identity(3)*(omega-alpha)
box_pts = np.dot(pts,box)
box_pts = np.array([c+alpha for c in box_pts])
return vert,box_pts
def populate_box(box,basis,avec,R):
'''
Populate the bounding box with points from the original lattice basis. These
represent candidate orbitals to populate the surface-projected unit cell.
*args*:
- **box**: numpy array of 8x3 float, vertices of corner of a box
- **basis**: list of orbital objects
- **avec**: numpy array of 3x3 float, lattice vectors
- **R**: numpy array of 3x3 float, rotation matrix
*return*:
- **basis_full**: list of Nx4 float, representing instances of orbitals copies,
retaining only their position and their orbital basis index. These orbitals fill
a container box larger than the region of interest.
***
'''
box_av = np.dot(box,np.linalg.inv(avec))
boxlims = np.array([[np.sign(box_av[:,i].min())*int(np.round(abs(box_av[:,i].min()))),np.sign(box_av[:,i].max())*int(np.round(abs(box_av[:,i].max())))] for i in range(3)])
boxmesh = np.array([[boxlims[0,0]+i,boxlims[1,0]+j,boxlims[2,0]+k]
for i in range(int(boxlims[0,1]-boxlims[0,0]+1))
for j in range(int(boxlims[1,1]-boxlims[1,0]+1))
for k in range(int(boxlims[2,1] - boxlims[2,0]+1))])
real_space = np.dot(boxmesh,avec)
basis_fill = []
for ri in real_space:
for b in basis:
tmp = np.dot(b.pos,R) + ri
basis_fill.append([*tmp,b.index])
return np.array(basis_fill)
def populate_par(points,avec):
'''
Fill the box with basis points, keeping only those which reside in the new
unit cell.
*args*:
- **points**: numpy array of Nx4 float ([:3] give position, [3] gives index)
- **avec**: numpy array of 3x3 float
*return*:
- **new_points**: Nx3 numpy array of float, coordinates of new orbitals
- **indices**: Nx1 numpy array of float, indices in original basis
***
'''
in_points = frac_inside(points,avec)
new_points = in_points[:,:3]
indices = in_points[:,-1]
return new_points,indices
def frac_inside(points,avec):
'''
Use fractional coordinates to determine whether a point is inside the new unit cell, or not.
This is a very simple way of establishing this point, and circumvents many of the awkward
rounding issues of the parallelepiped method I have used previously. Ultimately however,
imprecision of the matrix multiplication and inversion result in some rounding error which
must be corrected for. To do this, the fractional coordinates are rounded to the 4th digit.
This leads to a smaller uncertainty by over an order to 10^3 than each rounding done on the
direct coordinates.
*args*:
- **points**: numpy array of float (Nx4) indicating positions and basis indices of the points to consider
- **avec**: numpy array of 3x3 float, new lattice vectors
*return*:
- numpy array of Mx4 float, indicating positions and basis indices of the valid basis elements inside the new
unit cell.
***
'''
fpoints = np.around(abs_to_frac(avec,points[:,:3]),4)
bool_coords = np.array([True if (fp.min()>=0 and fp.max()<1) else False for fp in fpoints])
return points[bool_coords]
def gen_surface(avec,miller,basis):
'''
Construct the surface unit cell, to then be propagated along the 001 direction to form a slab
*args*:
- **avec**: numpy array of 3x3 float, lattice vectors for original unit cell
- **miller**: numpy array of 3 int, Miller indices indicating the surface orientation
- **basis**: list of orbital objects, orbital basis for the original lattice
*return*:
- **new_basis**: list of orbitals, surface unit cell orbital basis
- **vn_b**: numpy array of 3x3 float, the surface unit cell primitive lattice vectors
- **Rmat**: numpy array of 3x3 float, rotation matrix, to be used in post-multiplication order
***
'''
vn_b,Rmat = basal_plane(v_vecs(miller,avec))
pipe,box = par(vn_b)
avec_R = np.dot(avec,Rmat)
b_points = populate_box(box,basis,avec_R,Rmat)
in_pped,inds = populate_par(b_points,vn_b)
new_basis = np.empty(len(in_pped),dtype=olib.orbital)
ordering = sorted_basis(in_pped,inds)
for ii in range(len(in_pped)):
tmp = basis[int(ordering[ii][3])].copy()
tmp.slab_index = int(ordering[ii][3])
tmp.index = ii
tmp.pos = ordering[ii][:3]
tmp.proj,tmp.Dmat = olib.rot_projection(tmp.l,tmp.proj,Rmat.T) #CHANGE TO TRANSPOSE
new_basis[ii] = tmp
return new_basis,vn_b,Rmat
def sorted_basis(pts,inds):
'''
Re-order the elements of the new basis, with preference to z-position followed
by the original indexing
*args*:
- **pts**: numpy array of Nx3 float, orbital basis positions
- **inds**: numpy array of N int, indices of orbitals, from original basis
*return*:
- **labels_sorted**: numpy array of Nx4 float, [x,y,z,index], in order of increasing z, and index
***
'''
labels = np.array([[*pts[ii],inds[ii]] for ii in range(len(inds))])
labels_sorted = np.array(sorted(labels,key=itemgetter(2,3)))
return labels_sorted
def gen_slab(basis,vn,mint,minb,term,fine=(0,0)):
'''
Using the new basis defined for the surface unit cell, generate a slab
of at least mint (minimum thickness), minb (minimum buffer) and terminated
by orbital term. In principal the termination should be same on both top and
bottom to avoid inversion symmetry breaking between the two lattice terminations.
In certain cases, mint,minb may need to be tuned somewhat to get exactly the surface
terminations you want.
*args*:
- **basis**: list of instances of orbital objects
- **vn**: numpy array of 3x3 float, surface unit cell lattice vectors
- **mint**: float, minimum thickness of the slab, in Angstrom
- **minb**: float, minimum thickness of the vacuum buffer, in Angstrom
- **term**: tuple of 2 int, termination of the slab tuple (term[0] = top termination, term[1] = bottom termination)
- **fine**: tuple of 2 float, fine adjustment of the termination to precisely specify terminating atoms
*return*:
- **avec**: numpy array of float 3x3, updated lattice vector for the SLAB unit cell
- **new_basis**: array of new orbital basis objects, with slab-index corresponding to the original basis indexing,
and primary index corresponding to the order within the new slab basis
***
'''
pts = []
Nmin = int(np.ceil((mint+minb)/vn[2,2])+2)
for i in range(Nmin):
for bi in basis:
pts.append([*(i*vn[-1]+bi.pos),bi.atom,bi.slab_index,bi.index])
pts = np.array(pts)
z_order = pts[:,2].argsort()
pts = pts[z_order]
pts = np.array(sorted(pts,key=itemgetter(2,5)))
#termination organized by ATOM
term_1set = pts[pts[:,3]==term[1]]
term_0set = pts[pts[:,3]==term[0]]
surf = pts[:,2].max()-minb
base = term_1set[term_1set[:,2]>=(term_1set[:,2].min()+fine[1]),2].min()
top = term_0set[np.where(abs(term_0set[(term_0set[:,2]-surf)<fine[0],2]-surf)==abs(term_0set[(term_0set[:,2]-surf)<fine[0],2]-surf).min())[0][0],2]
cull = np.array([pts[p] for p in range(len(pts)) if base<=pts[p,2]<=top])
cull[:,2]-=top
avec = np.copy(vn)
avec[2] = Nmin*vn[2]
new_basis = np.empty(len(cull),dtype=olib.orbital)
for ii in range(len(cull)):
iter_orbital = basis[int(cull[ii,-1])].copy()
iter_orbital.slab_index = int(cull[ii,-1])
iter_orbital.index = ii
iter_orbital.pos = cull[ii,:3]-np.array([cull[-1,0],cull[-1,1],0])
iter_orbital.depth = iter_orbital.pos[2]
new_basis[ii] = iter_orbital
return avec,new_basis
def region(num):
'''
Generate a symmetric grid of points in number of lattice vectors.
*args*:
- **num**: int, grid will have size 2 num+1 in each direction
*return*:
- numpy array of size ((2 num+1)^3,3) with centre value of first entry of
(-num,-num,-num),...,(0,0,0),...,(num,num,num)
***
'''
num_symm = 2*num+1
return np.array([[int(i/num_symm**2)-num,int(i/num_symm)%num_symm-num,i%num_symm-num] for i in range((num_symm)**3)])
def unpack(Ham_obj):
'''
Reduce a Hamiltonian object down to a list of matrix elements. Include the Hermitian conjugate terms
*args*:
- **Ham_obj**: Hamiltonian object, c.f. *chinook.TB_lib.H_me*
*return*:
- **Hlist**: list of Hamiltonian matrix elements
***
'''
Hlist =[]
for hij in Ham_obj:
for el in hij.H:
Hlist.append([hij.i,hij.j,*el])
return Hlist
#
#
def H_surf(surf_basis,avec,H_bulk,Rmat,lenbasis):
'''
Rewrite the bulk-Hamiltonian in terms of the surface unit cell, with its
(most likely expanded) basis. The idea here is to organize all 'duplicate'
orbitals, in terms of their various connecting vectors. Using modular
arithmetic, we then create an organized dictionary which categorizes the
hopping paths within the new unit cell according to the new basis index
designation. For each element in the Hamiltonian then, we can do the same
modular definition of the hopping vector, easily determining which orbital
in our new basis this hopping path indeed corresponds to. We then make a
new list, organizing corresponding to the new basis listing.
*args*:
- **surf_basis**: list of orbitals in the surface unit cell
- **avec**: numpy array 3x3 of float, surface unit cell vectors
- **H_bulk**: *H_me* object(defined in *chinook.TB_lib.py*), as
the bulk-Hamiltonian
- **Rmat**: 3x3 numpy array of float, rotation matrix
(pre-multiply vectors) for rotating the coordinate system from bulk
to surface unit cell axes
- **lenbasis**: int, length of bulk basis
*return*:
- Hamiltonian object, written in the basis of the surface unit cell,
and its coordinate frame, rather than those of the bulk system
***
'''
av_i = np.linalg.inv(avec) #inverse lattice vectors
cv_dict = mod_dict(surf_basis,av_i) #generate dictionary of connecting vectors between each of the relevant orbitals, according to their definition in the bulk lattice.
H_old = unpack(H_bulk) #transform Hamiltonian object to list of hopping terms
Rcv = np.dot(np.array([h[2:5] for h in H_old]),Rmat) #rotate all hopping paths into the coordinate frame of the surface unit cell
H_new = [] #initialize the new Hamiltonian list
for ii in range(len(H_old)): #iterate over all original Hamiltonian hopping paths
hi = H_old[ii] #temporary Hamiltonian matrix element to consider
R_latt = np.mod(np.around(np.dot(Rcv[ii],av_i),3),1) #what is the hopping path, in new coordinate frame, in terms of modular vector (mod lattice vector)
R_compare = np.linalg.norm(R_latt-(cv_dict['{:d}-{:d}'.format(hi[0],hi[1])][:,2:5]),axis=1)#,np.linalg.norm((cv_dict['{:d}-{:d}'.format(hi[0],hi[1])][:,2:]-(1-R_latt)),axis=1)) #two possible choices:
try:
match = np.where(R_compare<5e-4)[0]
for mi in match:#find the match
tmp_H = [*cv_dict['{:d}-{:d}'.format(hi[0],hi[1])][int(mi)][:2],*np.around(Rcv[ii],4),hi[-1]]
H_new.append(tmp_H)
if H_new[-1][0]>H_new[-1][1]:
H_new[-1] = H_conj(tmp_H)
except IndexError:
print('ERROR: no valid hopping path found relating original Hamiltonian to surface unit cell.')
continue
print('Number of Bulk Hamiltonian Hopping Terms Found: {:d}, Number of Surface Basis Hopping Terms Filled: {:d}'.format(len(H_old),len(H_new)))
if (len(H_new)/len(H_old))!=(len(surf_basis)/(lenbasis)):
print('Going from {:d} to {:d} basis states'.format(lenbasis,len(surf_basis)))
print('Invalid HAMILTONIAN! Missing hopping paths.')
return []
Hobj = TB_lib.gen_H_obj(H_new)
return Hobj
def Hobj_to_dict(Hobj,basis):
'''
Associate a list of matrix elements with each orbital in the original basis.
The hopping paths are given not as direct units,but as number of unit-vectors
for each hopping path. So the actual hopping path will be:
np.dot(H[2:5],svec)+TB.basis[j].pos-TB.basis[i].pos
This facilitates determining easily which basis element we are dealing with.
For the slab, the new supercell will be extended along the 001 direction.
So to redefine the orbital indices for a given element, we just take
[i, len(basis)*(R_2)+j, (np.dot((R_0,R_1,R_2),svec)+pos[j]-pos[i]),H]
If the path goes into the vacuum buffer don't add it to the new list!
*args*:
- **Hobj**: *H_me* object(defined in *chinook.TB_lib.py*), as
the bulk-Hamiltonian
- **basis**: list of *orbital* objects
*return*:
- **Hdict**: dictionary of hopping paths associated with a given orbital
index
***
'''
Hdict = {ii:[] for ii in range(len(basis))}
Hlist = unpack(Hobj)
for hi in Hlist:
Hdict[hi[0]].append([*hi])
Hdict[hi[1]].append([*H_conj(hi)])
return Hdict
def build_slab_H(Hsurf,slab_basis,surf_basis,svec):
'''
Build a slab Hamiltonian, having already defined the surface-unit cell
Hamiltonian and basis. Begin by creating a dictionary corresponding to the
Hamiltonian matrix elements associated with the relevant surface unit cell
orbital which pairs with our slab orbital, and all its possible hoppings
in the original surface unit cell. This dictionary conveniently redefines
the hopping paths in units of lattice vectors between the relevant orbitals.
In this way, we can easily relabel a matrix element by the slab_basis
elements, and then translate the connecting vector in terms of the
pertinent orbitals.
If the resulting element is from the lower diagonal, take its conjugate.
Finally, only if the result is physical, i.e. corresponds to a hopping path
contained in the slab, and not e.g. extending into the vacuum,
should the matrix element be included in the new Hamiltonian. Finally,
the new list Hnew is made into a Hamiltonian object, as always, and
duplicates are removed.
*args*:
- **Hsurf**: *H_me* object(defined in *chinook.TB_lib.py*), as
the bulk-Hamiltonian from the surface unit cell
- **slab_basis**: list of orbital objects, slab unit cell basis
- **surf_basis**: list of orbital objects, surface unit cell basis
- **svec**: numpy array of 3x3 float, surface unit cell lattice vectors
*return*:
- list of Hamiltonian matrix elements in [i,j,x,y,z,Hij] format
***
'''
Hnew = []
Hdict = Hobj_to_dict(Hsurf,surf_basis) #dictionary of hoppings. keys correspond to the slab_index, values the relative hoppings elements
si = np.linalg.inv(svec)
D = slab_basis[0].slab_index
for oi in slab_basis:
Htmp = Hdict[oi.slab_index] #access relevant hopping paths for the orbital in question
for hi in Htmp: #iterate over all relevant hoppings
ncells = int(np.round(np.dot(hi[2:5]-surf_basis[hi[1]].pos+surf_basis[hi[0]].pos,si)[2])) #how many unit cells -- in the surface unit cell basis are jumped during this hopping--specifically, cells along the normal direction
Htmp_2 = [0]*6 #create empty hopping element, to be filled
Htmp_2[0] = int(oi.index) #matrix row should be the SLAB BASIS INDEX
Htmp_2[1] = int((D+oi.index)/len(surf_basis))*len(surf_basis) + int(len(surf_basis)*ncells+hi[1]-D)
#matrix column is calculated as follows:
#the first orbital's slab index is often not zero, D is the place-holder for the actual start. Following this
# index increments monotonically, while slab_index is defined mod-len(surf_basis). To get the new 'j' index,
#we find first the 'surface-cell' number of 'i', defined as int((D+i)/len(surf))*len(surf). Then we increment
#by the integer number of surface-unit cells covered by the hopping vector, and further by the difference between
#the original o2 index j, and the starting slab_index D.
Htmp_2[5] = hi[5]
try:
Htmp_2[2:5] = hi[2:5]
if 0<=Htmp_2[1]<len(slab_basis) and 0<=Htmp_2[0]<len(slab_basis):
if Htmp_2[1]>=Htmp_2[0]:
Hnew.append(Htmp_2)
except IndexError:
continue
Hobj = TB_lib.gen_H_obj(Hnew)
print('clean Hamiltonian')
for h in Hobj:
tmp_H = h.clean_H()
h.H = tmp_H.copy()
return unpack(Hobj)
def bulk_to_slab(slab_dict):
'''
Wrapper function for generating a slab tight-binding model, having
established a bulk model.
*args*:
- **slab_dict**: dictionary containing all essential information
regarding the slab construction:
- *'miller'*: numpy array len 3 of int, miller indices
- *'TB'*: Tight-binding model corresponding to the bulk model
- *'fine'*: tuple of 2 float. Fine adjustment of the slab limits,
beyond the termination to precisely indicate the termination.
units of Angstrom, relative to the bottom, and top surface generated
- *'thick'*: float, minimum thickness of the slab structure
- *'vac'*: float, minimum thickness of the slab vacuum buffer
to properly generate a surface with possible surface states
- *'termination'*: tuple of 2 int, specifying the basis indices
for the top and bottom of the slab structure
*return*:
- **slab_TB**: tight-binding TB object containing the slab basis
- **slab_ham**: Hamiltonian object, slab Hamiltonian
- **Rmat**: numpy array of 3x3 float, rotation matrix
***
'''
surf_basis,nvec,Rmat = gen_surface(slab_dict['TB'].avec,slab_dict['miller'],slab_dict['TB'].basis)
surf_ham = H_surf(surf_basis,nvec,slab_dict['TB'].mat_els,Rmat,len(slab_dict['TB'].basis))
slab_vec,slab_basis = gen_slab(surf_basis,nvec,slab_dict['thick'],slab_dict['vac'],slab_dict['termination'],slab_dict['fine'])
slab_ham = build_slab_H(surf_ham,slab_basis,surf_basis,nvec)
slab_TB = slab_dict['TB'].copy()
slab_TB.avec = slab_vec
slab_TB.basis = slab_basis
slab_TB.Kobj.kpts = np.dot(slab_dict['TB'].Kobj.kpts,Rmat)
return slab_TB,slab_ham,Rmat
def H_conj(h):
'''
Conjugate hopping path
*args*:
- **h**: list, input hopping path in format [i,j,x,y,z,Hij]
*return*:
- list, reversed hopping path, swapped indices, complex conjugate of the
hopping strength
***
'''
return [h[1],h[0],-h[2],-h[3],-h[4],np.conj(h[5])]
def mod_dict(surf_basis,av_i):
'''
Define dictionary establishing connection between slab basis elements and the
bulk Hamiltonian. The slab_indices relate to the bulk model, we can then compile
a list of *slab* orbital pairs (along with their connecting vectors) which should
be related to a given bulk model hopping. The hopping is expressed in terms of the
number of surface lattice vectors, rather than direct units of Angstrom.
*args*:
- **surf_basis**: list of orbital objects, covering the slab model
- **av_i**: numpy array of 3x3 float, inverse of the lattice vector matrix
*return*:
- **cv_dict**: dictionary with key-value pairs of
slab_index[i]-slab_index[j]:numpy.array([[i,j,mod_vec]...])
***
'''
cv_dict = {}
for bi in range(len(surf_basis)):
for bj in range(len(surf_basis)):
mod_vec = np.mod(np.around(np.dot((surf_basis[bj].pos-surf_basis[bi].pos),av_i),3),1)
try:
cv_dict['{:d}-{:d}'.format(surf_basis[bi].slab_index,surf_basis[bj].slab_index)].append([bi,bj,*mod_vec])
except KeyError:
cv_dict['{:d}-{:d}'.format(surf_basis[bi].slab_index,surf_basis[bj].slab_index)] =[[bi,bj,*mod_vec]]
for cvi in cv_dict:
cv_dict[cvi] = np.array(cv_dict[cvi])
return cv_dict
```
#### File: chinook/chinook/SlaterKoster.py
```python
import numpy as np
import chinook.wigner as Wlib
import chinook.Ylm as Ylm
def Vmat(l1,l2,V):
'''
For Slater-Koster matrix element generation, a potential matrix is
sandwiched in between the two bond-rotating Dmatrices. It should be
of the shape 2*l1+1 x 2*l2+1, and have the V_l,l',D terms along the
'diagonal'-- a concept that is only well defined for a square matrix.
For mismatched angular momentum channels, this turns into a diagonal
square matrix of dimension min(2*l1+1,2*l2+1) centred along the larger
axis. For channels where the orbital angular momentum change involves a
change in parity, the potential should change sign, as per Slater Koster's
original definition from 1954. This is taken care of automatically in
the Wigner formalism I use here, no need to have exceptions
*args*:
- **l1**, **l2**: int orbital angular momentum of initial and final states
- **V**: numpy array of float -- length should be min(**l1** ,**l2**)*2+1
*return*:
- **Vm**: numpy array of float, shape 2 **l1** +1 x 2 **l2** +1
***
'''
if l1==0 or l2==0:
Vm = np.zeros(max(2*l1+1,2*l2+1))
Vm[int(len(Vm)/2)] = V[0]
else:
Vm = np.zeros((2*l1+1,2*l2+1))
lmin = min(l1,l2)
lmax = max(l1,l2)
Vvals = np.identity(2*lmin+1)*np.array(V)
if l2>l1:
Vm[:,lmax-lmin:lmax-lmin+2*lmin+1] = Vvals
else:
Vm[lmax-lmin:lmax-lmin+2*lmin+1,:] = Vvals
return np.atleast_2d(Vm)
def SK_cub(Ymats,l1,l2):
'''
In order to generate a set of independent Lambda functions for rapid
generation of Hamiltonian matrix elements, one must nest the
definition of the lambda functions within another function. In this way,
we avoid cross-contamination of unrelated functions.
The variables which are fixed for a given lambda function are the
cubic -to- spherical harmonics (Ymat) transformations, and the
orbital angular momentum of the relevant basis channels. The output
lambda functions will be functions of the Euler-angles pertaining
to the hopping path, as well as the potential matrix V, which will be
passed as a numpy array (min(l1,l2)*2+1) long of float.
We follow the method described for rotated d-orbitals in the thesis of
JM Carter from Toronto (HY Kee), where the Slater-Koster hopping
matrix can be defined as the following operation:
1. Transform local orbital basis into spherical harmonics
2. Rotate the hopping path along the z-axis
3. Product with the diagonal SK-matrix
4. Rotate the path backwards
5. Rotate back into basis of local orbitals
6. Output matrix of hopping elements between all orbitals in the shell
to fill Hamiltonian
*args*:
- **Ymats**: list of numpy arrays corresponding to the relevant
transformation from cubic to spherical harmonic basis
- **l1**, **l2**: int orbital angular momentum channels relevant
to a given hopping pair
*return*:
- lambda function for the SK-matrix between these orbital shells,
for arbitrary hopping strength and direction.
***
'''
def SK_build(EA,EB,Ey,V):
o1rot = np.dot(Wlib.WignerD(l1,EA,EB,Ey),Ymats[0])
o2rot = np.dot(Wlib.WignerD(l2,EA,EB,Ey),Ymats[1])
try:
return np.dot(np.conj(o1rot).T,np.atleast_2d(np.dot(Vmat(l1,l2,V),o2rot)))
except ValueError:
return np.dot(np.conj(o1rot).T,np.atleast_2d(np.dot(Vmat(l1,l2,V).T,o2rot)))
return lambda EA,EB,Ey,V:SK_build(EA,EB,Ey,V)
def SK_full(basis):
'''
Generate a dictionary of lambda functions which take as keys the
atom,orbital for both first and second element.
Formatting is a1a2n1n2l1l2, same as for SK dictionary entries
*args*:
- **basis**: list of orbital objects composing the TB-basis
*return*:
- **SK_funcs**: a dictionary of hopping matrix functions
(lambda functions with args EA,EB,Ey,V as Euler angles and potential (V))
which can be executed for various hopping paths and potential strengths
The keys of the dictionary will be organized similar to the way the SK
parameters are passed, labelled by a1a2n1n2l1l2, which completely
defines a given orbital-orbital coupling
***
'''
SK_funcs = {}
Ymats = Ylm.Yproj(basis)
for yi in Ymats:
for yj in Ymats:
if (yi[0],yj[0],yi[1],yj[1],yi[2],yj[2]) not in SK_funcs.keys():
Y = [Ymats[yi],Ymats[yj]]
SK_funcs[(yi[0],yj[0],yi[1],yj[1],yi[2],yj[2])] = SK_cub(Y,yi[2],yj[2])
return SK_funcs
```
#### File: chinook/chinook/Ylm.py
```python
import numpy as np
from math import factorial
projdict={"0":np.array([[1.0,0.0,0.0,0.0]]),
"1x":np.array([[-np.sqrt(0.5),0.0,1,1],[np.sqrt(0.5),0.0,1,-1]]),"1y":np.array([[0.0,np.sqrt(0.5),1,1],[0,np.sqrt(0.5),1,-1]]),"1z":np.array([[1,0,1,0]]),
"2xy":np.array([[0.0,-np.sqrt(0.5),2,2],[0.0,np.sqrt(0.5),2,-2]]),"2yz":np.array([[0.0,np.sqrt(0.5),2,1],[0.0,np.sqrt(0.5),2,-1]]),
"2xz":np.array([[-np.sqrt(0.5),0,2,1],[np.sqrt(0.5),0,2,-1]]),"2ZR":np.array([[1.0,0.0,2,0]]),"2XY":np.array([[np.sqrt(0.5),0,2,2],[np.sqrt(0.5),0,2,-2]]),
"3z3":np.array([[1.0,0.0,3,0]]),"3xz2":np.array([[np.sqrt(0.5),0,3,-1],[-np.sqrt(0.5),0,3,1]]),
"3yz2":np.array([[0,np.sqrt(0.5),3,-1],[0,np.sqrt(0.5),3,1]]),"3xzy":np.array([[0,-np.sqrt(0.5),3,2],[0,np.sqrt(0.5),3,-2]]),
"3zXY":np.array([[np.sqrt(0.5),0,3,2],[np.sqrt(0.5),0,3,-2]]),"3xXY":np.array([[-np.sqrt(0.5),0,3,3],[np.sqrt(0.5),0,3,-3]]),
"3yXY":np.array([[0,np.sqrt(0.5),3,3],[0,np.sqrt(0.5),3,-3]])}
def Y(l,m,theta,phi):
'''
Spherical harmonics, defined here up to l = 4. This allows for photoemission from
initial states up to and including f-electrons (final states can be d- or g- like).
Can be vectorized with numpy.vectorize() to allow array-like input
*args*:
- **l**: int orbital angular momentum, up to l=4 supported
- **m**: int, azimuthal angular momentum |m|<=l
- **theta**: float, angle in spherical coordinates, radian measured from the z-axis [0,pi]
- **phi**: float, angle in spherical coordinates, radian measured from the x-axis [0,2pi]
*return*:
- complex float, value of spherical harmonic evaluated at theta,phi
'''
if l == 0:
if m==0:
return 0.5*np.sqrt(1.0/np.pi)*value_one(theta,phi)
else:
return 0.0
elif l == 1:
if abs(m) == 1:
return -np.sign(m)*0.5*np.sqrt(3.0/(2*np.pi))*np.exp(m*1.0j*phi)*np.sin(theta)
elif m == 0:
return 0.5*np.sqrt(3.0/np.pi)*np.cos(theta)
else:
return 0.0
elif l == 2:
if abs(m) == 2:
return 0.25*np.sqrt(15.0/(2*np.pi))*np.exp(m*1.0j*phi)*np.sin(theta)**2
elif abs(m) == 1:
return -np.sign(m)*0.5*np.sqrt(15.0/(2*np.pi))*np.exp(m*1.0j*phi)*np.sin(theta)*np.cos(theta)
elif m==0:
return 0.25*np.sqrt(5/np.pi)*(3*np.cos(theta)**2-1)
else:
return 0.0
elif l == 3:
if abs(m) == 3:
return -np.sign(m)*1.0/8*np.sqrt(35/np.pi)*np.exp(m*1.0j*phi)*np.sin(theta)**3
elif abs(m) == 2:
return 1.0/4*np.sqrt(105/(2*np.pi))*np.exp(m*1.0j*phi)*np.sin(theta)**2*np.cos(theta)
elif abs(m) == 1:
return -np.sign(m)*1.0/8*np.sqrt(21/np.pi)*np.exp(m*1.0j*phi)*np.sin(theta)*(5*np.cos(theta)**2-1)
elif m == 0:
return 1.0/4*np.sqrt(7/np.pi)*(5*np.cos(theta)**3-3*np.cos(theta))
else:
return 0.0
elif l == 4:
if abs(m) == 4:
return 3/16.*np.sqrt(35./2/np.pi)*np.sin(theta)**4*np.exp(m*1.0j*phi)
elif abs(m) == 3:
return -np.sign(m)*3/8.*np.sqrt(35/np.pi)*np.sin(theta)**3*np.cos(theta)*np.exp(m*1.0j*phi)
elif abs(m) == 2:
return 3./8.*np.sqrt(5/2/np.pi)*np.sin(theta)**2*(7*np.cos(theta)**2-1.0)*np.exp(m*1.0j*phi)
elif abs(m) == 1:
return -np.sign(m)*3./8*np.sqrt(5/np.pi)*np.sin(theta)*(7*np.cos(theta)**3-3*np.cos(theta))*np.exp(m*1.0j*phi)
elif m == 0:
return 3./16.*np.sqrt(1./np.pi)*(35.*np.cos(theta)**4 - 30.*np.cos(theta)**2 + 3.0)
else:
return 0.0
else:
return 0.0
def value_one(theta,phi):
'''
Flexible generation of the number 1.0, in either float or array format
*args*:
- **theta**: float or numpy array of float
- **phi**: float or numpy array of float
*return*:
**out**: float or numpy array of float, evaluated to 1.0, of same shape and type
as **theta**, **phi**
***
'''
if type(theta)==np.ndarray:
out =np.ones(np.shape(theta))
if type(phi)==np.ndarray:
out = np.ones(np.shape(phi))
elif type(theta)!=np.ndarray and type(phi)!=np.ndarray:
out = 1.0
return out
def binom(a,b):
'''
Binomial coefficient for 'a choose b'
*args*:
- **a**: int, positive
- **b**: int, positive
*return*:
- float, binomial coefficient
***
'''
return factorial(a+b)/float(factorial(a-b)*factorial(b))
def laguerre(x,l,j):
'''
Laguerre polynomial of order l, degree j, evaluated over x
*args*:
- **x**: float or numpy array of float, input
- **l**: int, order of polynomial
- **j**: int, degree of polynomial
*return*:
- **laguerre_output**: float or numpy array of float, shape as input **x**
***
'''
laguerre_output = sum([((-1)**i)*(binom(l+j,j-i)*x**i/float(factorial(i))) for i in range(j+1)])
return laguerre_output
def gaunt(l,m,dl,dm):
'''
I prefer to avoid using the sympy library where possible, for speed reasons. These are the explicitly defined
Gaunt coefficients required for dipole-allowed transitions (dl = +/-1) for arbitrary m,l and dm
These have been tested against the sympy package to confirm numerical accuracy for all l,m possible
up to l=5. This function is equivalent, for the subset of dm, dl allowed to
sympy.physics.wigner.gaunt(l,1,l+dl,m,dm,-(m+dm))
*args*:
- **l**: int orbital angular momentum quantum number
- **m**: int azimuthal angular momentum quantum number
- **dl**: int change in l (+/-1)
- **dm**: int change in azimuthal angular momentum (-1,0,1)
*return*:
- float Gaunt coefficient
***
'''
try:
if abs(m + dm)<=(l+dl):
if dl==1:
if dm == 1:
return (-1.)**(m+1)*np.sqrt(3*(l+m+2)*(l+m+1)/(8*np.pi*(2*l+3)*(2*l+1)))
elif dm == 0:
return (-1.)**(m)*np.sqrt(3*(l-m+1)*(l+m+1)/(4*np.pi*(2*l+3)*(2*l+1)))
elif dm == -1:
return (-1.)**(m-1)*np.sqrt(3*(l-m+2)*(l-m+1)/(8*np.pi*(2*l+3)*(2*l+1)))
elif dl == -1:
if dm==1:
return (-1.)**(m)*np.sqrt(3*(l-m)*(l-m-1)/(8*np.pi*(2*l+1)*(2*l-1)))
elif dm == 0:
return (-1.)**(m)*np.sqrt(3*(l+m)*(l-m)/(4*np.pi*(2*l+1)*(2*l-1)))
elif dm == -1:
return (-1.)**(m)*np.sqrt(3*(l+m)*(l+m-1)/(8*np.pi*(2*l+1)*(2*l-1)))
else:
return 0.0
except ValueError:
print('Invalid entries for dipole matrix element-related Gaunt coefficients')
print('l = {:0.4f}, m = {:0.4f}, dl = {:0.4f}, dm = {:0.4f}'.format(l,m,dl,dm))
return 0.0
def Yproj(basis):
'''
Define the unitary transformation rotating the basis of different inequivalent atoms in the
basis to the basis of spherical harmonics for sake of defining L.S operator in basis of user
29/09/2018 added reference to the spin character 'sp' to handle rotated systems effectively
*args:*
- **basis**: list of orbital objects
*return*:
- dictionary of matrices for the different atoms and l-shells--keys are tuples of (atom,l)
***
'''
normal_order = {0:{'':0},1:{'x':0,'y':1,'z':2},2:{'xz':0,'yz':1,'xy':2,'ZR':3,'XY':4},3:{'z3':0,'xz2':1,'yz2':2,'xzy':3,'zXY':4,'xXY':5,'yXY':6}}
a = basis[0].atom
n = basis[0].n
l = basis[0].l
sp = basis[0].spin
M = {}
M_tmp = np.zeros((2*l+1,2*l+1),dtype=complex)
for b in basis:
if np.linalg.norm(b.Dmat-np.identity(2*b.l+1))>0:
Dmat = b.Dmat
else:
Dmat = None
loc_rot = b.orient
label = b.label[2:]
if b.atom==a and b.n==n and b.l==l and b.spin==sp:
for p in b.proj:
M_tmp[l-int(p[-1]),normal_order[l][label]] = p[0]+1.0j*p[1]
else:
#If we are using a reduced basis, fill in orthonormalized projections for other states in the shell
#which have been ignored in our basis choice--these will still be relevant to the definition of the LS operator
M_tmp = fillin(M_tmp,l,Dmat)
M[(a,n,l,sp)] = M_tmp
##Initialize the next M matrix
a = b.atom
n = b.n
l = b.l
sp = b.spin
M_tmp = np.zeros((2*l+1,2*l+1),dtype=complex)
for p in b.proj:
M_tmp[l-int(p[-1]),normal_order[l][label]] = p[0]+1.0j*p[1]
M_tmp = fillin(M_tmp,l,loc_rot)
M[(a,n,l,sp)] = M_tmp
return M
def fillin(M,l,Dmat=None):
'''
If only using a reduced subset of an orbital shell (for example, only t2g states in d-shell),
need to fill in the rest of the projection matrix with some defaults
*args*:
- **M**: numpy array of (2l+1)x(2l+1) complex float
- **l**: int
- **Dmat**: numpy array of (2l+1)x(2l+1) complex float
*return*:
- **M**: numpy arrayof (2l+1)x(2l+1) complex float
***
'''
normal_order_rev = {0:{0:''},1:{0:'x',1:'y',2:'z'},2:{0:'xz',1:'yz',2:'xy',3:'ZR',4:'XY'},3:{0:'z3',1:'xz2',2:'yz2',3:'xzy',4:'zXY',5:'xXY',6:'yXY'}}
for m in range(2*l+1):
if np.linalg.norm(M[:,m])==0: #if column is empty (i.e. user-defined projection does not exist)
proj = np.zeros(2*l+1,dtype=complex)
for pi in projdict[str(l)+normal_order_rev[l][m]]:
proj[l-int(pi[-1])] = pi[0]+1.0j*pi[1] #fill the column with generic projection for this orbital (this will be a dummy)
if type(Dmat)==np.ndarray:
# print('l: {:d},'.format(l),'Dmat: ',Dmat,'proj: ',proj)
proj = np.dot(Dmat,proj)
for mp in range(2*l+1): #Orthogonalize against the user-defined projections
if mp!=m:
if np.linalg.norm(M[:,mp])!=0:
if np.dot(M[:,m],M[:,mp])>1e-10:
proj = GramSchmidt(proj,M[:,mp])
M[:,m] = proj
return M
def GramSchmidt(a,b):
'''
Simple orthogonalization of two vectors, returns orthonormalized vector
*args*:
- **a**, **b**: numpy array of same length
*returns*:
- **GS_a**: numpy array of same size, orthonormalized to the b vector
***
'''
GS_a = a - np.dot(a,b)/np.dot(b,b)*b
return GS_a/np.linalg.norm(GS_a)
if __name__=="__main__":
x = np.linspace(0,5,100)
tmp = laguerre(x,5,0)
# th = np.random.random()*np.pi
# ph = np.random.random()*2*np.pi
# for i in range(4):
# for j in range(-i,i+1):
# Yme = Y(i,j,th,ph)
# Ysc = sc.sph_harm(j,i,ph,th)
# diff = abs(Yme-Ysc)
# print i,j,diff
#
```
#### File: source/downloads/graphene_backend.py
```python
import numpy as np
import chinook.build_lib as build_lib
import chinook.ARPES_lib as arpes_lib
import chinook.operator_library as op_lib
import chinook.orbital_plotting as oplot
def construct_tightbinding(pzonly = False):
'''
Helper function for building graphene tight-binding model.
User can specify if they want just the pz-states included, or
if not, the full C2p3 basis.
*args*:
- **pzonly**: bool, True if wanting only pz-orbitals
*return*:
- **TB**: tight-binding object
- **kpath**: momentum path object, contains points and labels
for diagonalization
'''
#### DEFINE LATTICE UNIT CELL#######
alatt = 2.46
interlayer = 100.0
avec = np.array([[-alatt/2,alatt*np.sqrt(3/4.),0.0],
[alatt/2,alatt*np.sqrt(3/4.),0.0],
[0.0,0.0,interlayer]])
####### DEFINE ORBITAL BASIS ########
spin_args = {'bool':False}
basis_positions = np.array([[0.0,0.0,0.0],
[0.0,alatt/np.sqrt(3.0),0.0]])
if pzonly:
orbitals = ["21z"]
else:
orbitals = ["20","21x","21y","21z"]
basis_args = {'atoms':[0,0],
'Z':{0:6},
'orbs':[orbitals,orbitals],
'pos':basis_positions,
'spin':spin_args}
#### DEFINE HAMILTONIAN ####
SK = {"020":-8.81,"021":-0.44, #onsite energies
"002200S":-5.279, #nearest-neighbour Vssσ
"002201S":5.618, #nearest-neighbour Vspσ
"002211S":6.05,"002211P":-3.07} #nearest-neighbour Vppσ,Vppπ
hamiltonian_args = {'type':'SK',
'V':SK,
'avec':avec,
'cutoff':alatt*0.7,
'spin':spin_args}
#### DEFINE MOMENTUM PATH ####
G = np.array([0,0,0]) #gamma point
K = np.array([1./3,2./3,0]) #BZ corner for graphene
M = np.array([0,0.5,0.0]) #BZ edge centre
momentum_args= {'type':'F',
'avec':avec,
'grain':200,
'pts':[G,K,M,G],
'labels':['$\\Gamma$','K','M','$\\Gamma$']}
#### CONSTRUCT MODEL ####
basis = build_lib.gen_basis(basis_args)
kpath = build_lib.gen_K(momentum_args)
TB = build_lib.gen_TB(basis,hamiltonian_args,kpath)
return TB,kpath
def do_fatbands(TB,projections):
'''
Calculate the orbitally-projected bandstructure, for a series of
orbital projections
*args*:
- **TB**: tight-binding object
- **projections**: list of lists of int, e.g. [[0],[1,2],[2,4,6]]
'''
for pi in range(len(projections)):
op_lib.fatbs(projections[pi],TB,Elims=(TB.Eband.min()*1.1,TB.Eband.max()*1.1))
def setup_arpes(TB,Kpt,klimit=0.1,Elimits=[-2,0.2],Npoints=100):
'''
Initialize an ARPES experiment over a 2D momentum mesh.
*args*:
- **TB**: tight-binding object
- **Kpt**: iterable, length 3 of float, indicating centre of calculation
-**klimit**: float, range of momentum in 1/A about centre Kpt
- **Elimits**: iterable, length 2 of float, indicating energy range of interest, in eV
- **Npoints**: int, number of k-points along each dimension
*return*:
- **experiment**: experiment object, with matrix elements computed
'''
arpes_args={'cube':{'X':[Kpt[0]-klimit,Kpt[0]+klimit,Npoints],
'Y':[Kpt[1]-klimit,Kpt[1]+klimit,Npoints],
'kz':Kpt[2],
'E':[Elimits[0],Elimits[1],1000]},
'SE':['poly',0.01,0,0.1],
'hv': 21.2,
'pol':np.array([-1,0,1]),
'resolution':{'E':0.02,'k':0.005},
'T':4.2}
experiment = arpes_lib.experiment(TB,arpes_args)
experiment.datacube()
return experiment
def plot_wavefunction(TB,band_index,k_index,nangles=20):
'''
Plot orbital projection of eigenvector, assumes already diagonalized
*args*:
- **TB**: tight-binding object
- **band_index**: int, index of band zero-based from low-to-high energy
- **k_index**: int, index of momentum point in the K-path
*kwargs*:
- **nangles**: int, optional, number of points in angular mesh for orbital plotting
'''
wavefunction = oplot.wavefunction(basis=TB.basis,vector=TB.Evec[k_index,:,band_index])
_ = wavefunction.triangulate_wavefunction(nangles)
def semenoff_mass(TB,mass):
'''
Add Semenoff mass to the Hamiltonian
*args*:
- **TB**: tight-binding model
- **mass**: float, mass term
'''
Hnew = [[0,0,0,0,0,mass/2],
[1,1,0,0,0,-mass/2]]
TB.append_H(Hnew)
def haldane_mass(TB,mass):
'''
Add Haldane terms to the Hamiltonian
*args*:
- **TB**: tight-binding model
- **mass**: float, mass term
'''
Hnew = []
vectors = [TB.avec[0],TB.avec[1],TB.avec[1]-TB.avec[0]]
for ii in range(2):
for jj in range(3):
Hnew.append([ii,ii,*vectors[jj],-(2*ii-1)*0.5j*mass])
Hnew.append([ii,ii,*(-vectors[jj]),(2*ii-1)*0.5j*mass])
TB.append_H(Hnew)
``` |
{
"source": "jminar/crispy",
"score": 2
} |
#### File: crispy/crispy/config.py
```python
import logging
import os
import sys
from packaging.version import parse
from PyQt5.QtCore import QSettings, QStandardPaths
from crispy import version, resourceAbsolutePath
logger = logging.getLogger(__name__)
class Config:
@property
def name(self):
return "Crispy" if sys.platform == "win32" else "crispy"
@property
def path(self):
return os.path.split(self.settings.fileName())[0]
@property
def settings(self):
settings = QSettings(
QSettings.IniFormat, QSettings.UserScope, self.name, "settings"
)
# Set default values if the config file is empty or was not created.
if not settings.allKeys():
logger.debug("Loading default settings.")
settings.beginGroup("Quanty")
settings.setValue("Path", self.findQuanty())
settings.setValue("Verbosity", "0x0000")
settings.setValue("DenseBorder", "2000")
settings.setValue("ShiftSpectra", True)
settings.setValue("RemoveFiles", True)
settings.endGroup()
settings.setValue("CheckForUpdates", True)
settings.setValue("CurrentPath", os.path.expanduser("~"))
settings.setValue("Version", version)
settings.sync()
return settings
def read(self):
return self.settings
def removeOldFiles(self):
"""Function that removes the settings from previous versions."""
# This is the very first way settings were stored.
root = QStandardPaths.standardLocations(QStandardPaths.GenericConfigLocation)[0]
path = os.path.join(root, self.name)
if parse(version) < parse("0.7.0"):
try:
os.remove(os.path.join(path, "settings.json"))
os.rmdir(path)
logger.debug("Removed old configuration file.")
except (IOError, OSError):
pass
# Fix a naming error in version 2020.1rc0.
if parse("2020.1rc0") <= parse(version):
root, _ = os.path.split(self.settings.fileName())
try:
os.remove(os.path.join(root, "settings-new.ini"))
logger.debug("Removed old configuration file.")
except (IOError, OSError):
pass
@staticmethod
def findQuanty():
if sys.platform == "win32":
executable = "Quanty.exe"
localPath = resourceAbsolutePath(os.path.join("quanty", "bin", "win32"))
elif sys.platform == "darwin":
executable = "Quanty"
localPath = resourceAbsolutePath(os.path.join("quanty", "bin", "darwin"))
else:
localPath = None
executable = "Quanty"
envPath = QStandardPaths.findExecutable(executable)
if localPath is not None:
localPath = QStandardPaths.findExecutable(executable, [localPath])
# Check if Quanty is in the paths defined in the $PATH.
if envPath:
path = envPath
# Check if Quanty is bundled with Crispy.
elif localPath is not None:
path = localPath
else:
path = None
if path is None:
logger.debug(
"Could not find the Quanty executable."
'Please set it up using the "Preferences" dialog.'
)
return path
def setQuantyPath(self, path):
self.settings.setValue("Quanty/Path", path)
self.settings.sync()
```
#### File: gui/quanty/details.py
```python
import os
from PyQt5.QtCore import QPoint, QSize
from PyQt5.QtWidgets import QDialog, QWidget
from PyQt5.uic import loadUi
from crispy import resourceAbsolutePath
from crispy.config import Config
from crispy.gui.utils import fixedFont, setMappings
settings = Config().read()
class AxisWidget(QWidget):
def __init__(self, parent=None):
super().__init__(parent=parent)
uiPath = os.path.join("gui", "uis", "quanty", "details", "axis.ui")
loadUi(resourceAbsolutePath(uiPath), baseinstance=self, package="crispy.gui")
self.mappers = list()
def clear(self):
if self.mappers:
for mapper in self.mappers:
mapper.clearMapping()
self.shiftLineEdit.clear()
self.gaussianLineEdit.clear()
self.lorentzianLineEdit.clear()
def populate(self, axis):
self.clear()
MAPPINGS = (
(self.shiftLineEdit, axis.shift),
(self.lorentzianLineEdit, axis.lorentzian),
(self.gaussianLineEdit, axis.gaussian),
)
self.mappers = setMappings(MAPPINGS)
class DetailsDialog(QDialog):
def __init__(self, parent=None):
super().__init__(parent=parent)
uiPath = os.path.join("gui", "uis", "quanty", "details", "main.ui")
loadUi(resourceAbsolutePath(uiPath), baseinstance=self, package="crispy.gui")
font = fixedFont()
self.inputText.setFont(font)
self.outputText.setFont(font)
# self.summaryText.setFont(font)
self.xAxis = AxisWidget()
self.yAxis = AxisWidget()
self.axesTabWidget.addTab(self.xAxis, None)
self.mappers = list()
# This avoids closing the window after changing the value in a line
# edit and then pressing return.
self.closePushButton.setAutoDefault(False)
self.closePushButton.clicked.connect(self.close)
def clear(self):
self.setWindowTitle("Details")
if self.mappers:
for mapper in self.mappers:
mapper.clearMapping()
self.scaleLineEdit.clear()
self.normalizationComboBox.clear()
self.xAxis.clear()
self.yAxis.clear()
self.inputText.clear()
self.outputText.clear()
# self.summaryText.clear()
def populate(self, result):
self.clear()
if result is None:
return
MAPPINGS = (
(self.scaleLineEdit, result.axes.scale),
(self.normalizationComboBox, result.axes.normalization),
)
self.mappers = setMappings(MAPPINGS)
self.xAxis.populate(result.axes.xaxis)
self.axesTabWidget.setTabText(0, result.axes.xaxis.label)
if result.experiment.isTwoDimensional:
self.axesTabWidget.addTab(self.yAxis, None)
self.axesTabWidget.setTabText(1, result.axes.yaxis.label)
self.yAxis.populate(result.axes.yaxis)
else:
self.axesTabWidget.removeTab(1)
model = result.model()
self.spectraView.setModel(model)
index = model.indexFromItem(result.spectra.toPlot)
self.spectraView.setRootIndex(index)
self.inputText.setPlainText(result.input)
self.outputText.setPlainText(result.output)
# self.summaryText.setPlainText(result.summary)
if result.value is not None:
title = f"Details for {result.value}"
self.setWindowTitle(title)
def showEvent(self, event):
self.loadSettings()
super().showEvent(event)
def closeEvent(self, event):
self.saveSettings()
super().closeEvent(event)
def loadSettings(self):
settings.beginGroup("DetailsDialog")
size = settings.value("Size")
if size is not None:
self.resize(QSize(size))
pos = settings.value("Position")
if pos is not None:
self.move(QPoint(pos))
settings.endGroup()
def saveSettings(self):
settings.beginGroup("DetailsDialog")
settings.setValue("Size", self.size())
settings.setValue("Position", self.pos())
settings.endGroup()
settings.sync()
``` |
{
"source": "jminardi/RobotBrain",
"score": 3
} |
#### File: servers/car_server/car_server.py
```python
import socket
import threading
import time
from motor import Motor
from servo import Servo
class CarServer(object):
def __init__(self, motor_pins=(24, 25), servo_pin=0, port=2012):
self.port = port
# The motor and servo for driving
self.motor = Motor(*motor_pins)
self.servo = Servo(servo_pin)
# The most recent coordinates from the accelerameter
self.coords = (0, 0, 0)
# Whether or not to continue running the server
self._run = True
self.start()
def start(self):
""" Initialize and start threads. """
self._server_thread = threading.Thread(target=self._server_worker)
self._server_thread.start()
self._control_thread = threading.Thread(target=self._control_worker)
self._control_thread.start()
def stop(self):
""" Shut down server and control threads. """
self._run = False
def _server_worker(self):
HOST = '' # Symbolic name meaning all available interfaces
PORT = self.port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
s.listen(1)
conn, addr = s.accept()
print 'Connected by', addr
while self._run:
data = conn.recv(1024)
if data:
coords = data[1:].split(',')
x, y, z = [float(n) for n in coords]
self.coords = (x, y, z)
conn.sendall(data)
conn.close()
def _control_worker(self):
while self._run:
x, y, z = self.coords
forward_speed = -y/10
turning_power = (x+10)/20
self.motor.drive(forward_speed)
self.servo.set(turning_power)
```
#### File: servers/sensor_server/io_controller.py
```python
import numpy as np
from matplotlib.pyplot import Figure
from mpl_toolkits.mplot3d import Axes3D
from enable.api import Component
from traits.api import (HasStrictTraits, Int, Float, Instance, Any, Dict,
on_trait_change, Set, List, NO_COMPARE)
from chaco.api import Plot, ArrayPlotData
from links_component import LinksComponent
# Map of input names and the amount needed to normalize them
INPUT_MAP = [('potentiometer', 1024.0),
('distance', 100.0),
('switch', 1),
('acc_z', 1024.0),
('acc_y', 1024.0),
('acc_x', 1024.0)]
OUTPUT_MAP = ['motor', 'servo', 'led']
class IOController(HasStrictTraits):
### Current Sensor Values ################################################
acc_x = Float(plot_data=True, comparison_mode=NO_COMPARE)
acc_y = Float(plot_data=True, comparison_mode=NO_COMPARE)
acc_z = Float(plot_data=True, comparison_mode=NO_COMPARE)
switch = Float(plot_data=True, comparison_mode=NO_COMPARE)
distance = Float(plot_data=True, comparison_mode=NO_COMPARE)
potentiometer = Float(plot_data=True, comparison_mode=NO_COMPARE)
### Plots ################################################################
logo_plot = Instance(Figure)
acc_x_plot = Instance(Plot)
acc_y_plot = Instance(Plot)
acc_z_plot = Instance(Plot)
switch_plot = Instance(Plot)
distance_plot = Instance(Plot)
pot_plot = Instance(Plot)
link_plot = Instance(Component)
plot_data = Instance(ArrayPlotData)
line = Any()
ax = Any()
### Outputs ##############################################################
led = Int(output=True)
servo = Int(output=True)
motor = Int(output=True)
### IOController Interface ###############################################
added_links = List()
removed_links = List()
outputs = Dict()
### Private Traits #######################################################
_current_links = Set()
### Trait Defaults #######################################################
def _logo_plot_default(self):
fig = Figure()
ax = Axes3D(fig)
line, = ax.plot((1, 2), (1, 2), (1, 2))
self.line = line
self.ax = ax
self.ax.set_xlim(0, 1, auto=False)
self.ax.set_ylim(0, 1, auto=False)
self.ax.set_zlim(0, 1, auto=False)
return fig
def _acc_x_plot_default(self):
plot = Plot(self.plot_data)
plot.plot(('acc_x',))
plot.padding = (0, 0, 0, 0)
plot.value_mapper.range.low_setting = 0
plot.value_mapper.range.high_setting = 1
return plot
def _acc_y_plot_default(self):
plot = Plot(self.plot_data)
plot.plot(('acc_y',))
plot.padding = (0, 0, 0, 0)
plot.value_mapper.range.low_setting = 0
plot.value_mapper.range.high_setting = 1
return plot
def _acc_z_plot_default(self):
plot = Plot(self.plot_data)
plot.plot(('acc_z',))
plot.padding = (0, 0, 0, 0)
plot.value_mapper.range.low_setting = 0
plot.value_mapper.range.high_setting = 1
return plot
def _switch_plot_default(self):
plot = Plot(self.plot_data)
plot.plot(('switch',))
plot.padding = (0, 0, 0, 0)
plot.value_mapper.range.low_setting = 0
plot.value_mapper.range.high_setting = 1
return plot
def _distance_plot_default(self):
plot = Plot(self.plot_data)
plot.plot(('distance',))
plot.padding = (0, 0, 0, 0)
plot.value_mapper.range.low_setting = 0
plot.value_mapper.range.high_setting = 1
return plot
def _pot_plot_default(self):
plot = Plot(self.plot_data)
plot.plot(('potentiometer',))
plot.padding = (0, 0, 0, 0)
plot.value_mapper.range.low_setting = 0
plot.value_mapper.range.high_setting = 1
return plot
def _link_plot_default(self):
return LinksComponent()
def _plot_data_default(self):
plot_data = ArrayPlotData()
plot_data.set_data('distance', np.zeros(50))
plot_data.set_data('potentiometer', np.zeros(50))
plot_data.set_data('switch', np.zeros(50))
plot_data.set_data('acc_x', np.zeros(50))
plot_data.set_data('acc_y', np.zeros(50))
plot_data.set_data('acc_z', np.zeros(50))
return plot_data
def clicked(self, win):
import ipdb
ipdb.set_trace() # XXX BREAKPOINT
### Trait Change Handlers ################################################
@on_trait_change('acc_x, acc_y, acc_z')
def _update_3d_plot(self):
if self.line and self.ax and self.ax.figure.canvas:
x, y, z = self.acc_x, self.acc_y, self.acc_z
#self.line.set_data(np.array([[0, 0, 0], [x, y, z]]).T)
data = np.array([[.5, .5, .5], [x, y, z]]).T
self.line.set_data(data[0:2, :])
self.line.set_3d_properties(data[2, :])
self.ax.figure.canvas.draw()
#print x, y, z
#self.ax.clear()
#self.ax.plot((0, x), (0, y), (0, z))
#self.ax.set_xlim(0, 1, auto=False)
#self.ax.set_ylim(0, 1, auto=False)
#self.ax.set_zlim(0, 1, auto=False)
#self.ax.figure.canvas.draw()
@on_trait_change('+plot_data')
def _push_to_plot_data(self, name, new):
# XXX This is causing NSConcreteMapTable to leak
ary = self.plot_data[name]
if ary is not None:
ary = np.append(ary, new)
ary = ary[-50:]
self.plot_data.set_data(name, ary)
@on_trait_change('+output')
def _push_to_server(self, name, new):
self.outputs[name] = new
print self.outputs
@on_trait_change('link_plot.links[]')
def _links_changed(self, new):
new = set(new)
old = self._current_links
added = new - old
added_links = []
for i, out in added:
added_links.append((INPUT_MAP[i], OUTPUT_MAP[out]))
removed = old - new
removed_links = []
for i, out in removed:
removed_links.append((INPUT_MAP[i], OUTPUT_MAP[out]))
self._current_links = new
self.added_links.extend(added_links)
self.removed_links.extend(removed_links)
print added, removed
```
#### File: servers/sensor_server/sensor_client.py
```python
import json
import sys
import threading
import enaml
import zmq
from io_controller import IOController
class SensorApp(object):
#def __init__(self, ip='192.168.43.48', port=2019):
def __init__(self, ip='192.168.1.80', port=2024):
self.ip = ip
self.port = port
self._run = True
self.io_controller = IOController()
self.start()
def start(self):
self._sensor_client_thread = threading.Thread(
target=self._sensor_client_worker)
self._sensor_client_thread.start()
def stop(self):
self._run = False
def _sensor_client_worker(self):
## XXX Mock sensor values
#import numpy as np
#while self._run:
# updates = {'acc_x': int(np.random.random() * 1024),
# 'acc_y': int(np.random.random() * 1024),
# 'acc_z': int(np.random.random() * 1024),
# 'switch': np.random.random() * 360,
# 'distance': np.random.random() * 90,
# 'potentiometer': np.random.random()}
# self.io_controller.set(**updates)
# import time
# time.sleep(.1)
context = zmq.Context()
# Socket to talk to server
socket = context.socket(zmq.REQ)
socket.connect("tcp://{}:{}".format(self.ip, self.port))
while self._run:
send = {}
if self.io_controller.added_links:
add = self.io_controller.added_links
self.io_controller.added_links = []
send['add_link'] = add
if self.io_controller.removed_links:
remove = self.io_controller.removed_links
self.io_controller.removed_links = []
send['remove_link'] = remove
if self.io_controller.outputs:
outputs = self.io_controller.outputs
self.io_controller.outputs = {}
send['out'] = outputs
socket.send(json.dumps(send))
message = socket.recv()
self.io_controller.set(**json.loads(message))
socket.close()
if __name__ == '__main__':
from enaml.stdlib.sessions import show_simple_view
with enaml.imports():
from sensor_view import SensorViewWindow
ip, port = sys.argv[1].split(':')
sensor_app = SensorApp(ip=ip, port=port)
window = SensorViewWindow(io_controller=sensor_app.io_controller)
show_simple_view(window)
sensor_app.stop()
``` |
{
"source": "Jminding/Guessing-Game",
"score": 4
} |
#### File: Jminding/Guessing-Game/Guessing Game.py
```python
from tkinter import *
# For some reason, importing tkinter doesn't import messagebox in Python 3.6+
from tkinter import messagebox
from tkinter import simpledialog
guessList = []
class GuessNumber(Frame):
'''represents a board of guess your number'''
def __init__(self,master,lowerRange,upperRange):
'''GuessNumber(lowerRange,upperRange)
creates a guessing number button board'''
Frame.__init__(self,master)
self.grid()
self.lowerRange = lowerRange
self.upperRange = upperRange
self.computerGuess = (self.lowerRange + self.upperRange)//2
self.endgame = None
Label(self,text="My Guess").grid(row=0,column=0)
Label(self,text=self.computerGuess).grid(row=2,column=0)
Label(self,text="Result").grid(row=0,column=1)
self.tooHighButton = Button(self,text="Too High",command=self.too_high).grid(row=1,column=1)
self.tooLowButton = Button(self,text="Too Low",command=self.too_low).grid(row=3,column=1)
self.correctButton = Button(self,text="Correct!",command=self.correct).grid(row=2,column=1)
def too_high(self):
'''if answer is too high'''
guessList.append(self.computerGuess)
self.upperRange = self.computerGuess
return GuessNumber(root,self.lowerRange,self.computerGuess)
def too_low(self):
'''if answer is too low'''
guessList.append(self.computerGuess)
self.lowerRange = self.computerGuess
return GuessNumber(root,self.computerGuess,self.upperRange)
def correct(self):
'''if the answer is correct'''
guessList.append(self.computerGuess)
if messagebox.showinfo('Thinking of your number',"Yay -- I got it! It took me " + str(len(guessList)) + " tries. Here's my full list of guesses: " + str(guessList) + ". YAY",parent=self):
self.destroy()
self.quit()
root = Tk()
def play():
lowerRange = int(simpledialog.askstring(title="Lower Bound", prompt="Lower Bound:"))
upperRange = int(simpledialog.askstring(title="Upper Bound", prompt="Upper Bound:"))
root.title('Guess')
guess = GuessNumber(root,lowerRange,upperRange)
guess.mainloop()
play()
``` |
{
"source": "jmineraud/rust-multiple-sdk-with-callbacks",
"score": 2
} |
#### File: rust-multiple-sdk-with-callbacks/python-sdk/ping_pong_sdk.py
```python
import sys, ctypes
import os.path
from ctypes import c_char_p, c_uint32, Structure, POINTER, CFUNCTYPE
# Define a type for our callback
CBFUNC = CFUNCTYPE(None, c_uint32) # Callback does take an unsigned int as parameters and does not return anything
# Define a place holder for the structure
# This will only be used in conjunction with the POINTER method, which creates a new type as a pointer to an existing one.
class PingPongS(Structure):
pass
current_dir = os.path.dirname(__file__)
lib_dir = os.path.abspath(os.path.join(current_dir, '..', 'rust-lib', 'target', 'release'))
prefix = {'win32': ''}.get(sys.platform, 'lib') # add prefix lib to all but windows
extension = {'darwin': '.dylib', 'win32': '.dll'}.get(sys.platform, '.so') # extension is .so for linux, .dylib for OSX and .dll for windows
lib_name = os.path.join(lib_dir, prefix + "mylib" + extension)
lib = ctypes.cdll.LoadLibrary(lib_name) # Load the library
lib.hello_world.restype = c_char_p # Returns the hello + args string
lib.hello_world.args = (c_char_p, ) # Takes a str as argument
lib.ping_pong_new.restype = POINTER(PingPongS) # Return a pointer to self
lib.ping_pong_new.argtypes = (c_uint32, c_uint32, ) # Takes start and trigger input
lib.ping_pong_free.argtypes = (POINTER(PingPongS), ) # Equivalent to self
lib.ping_pong_set_callback.argtypes = (POINTER(PingPongS), CBFUNC) # Equivalent to self, Callback
lib.ping_pong_ping.argtypes = (POINTER(PingPongS), ) # Equivalent to self
def hello(to):
return lib.hello_world(str.encode(to)).decode()
class PingPong:
def __init__(self, start, trigger):
self.obj = lib.ping_pong_new(start, trigger)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
lib.ping_pong_free(self.obj)
def set_callback(self, callback):
lib.ping_pong_set_callback(self.obj, CBFUNC(callback))
def ping(self):
lib.ping_pong_ping(self.obj)
# Then we can test the library
print(hello("from the Rust native library called by the Python SDK"))
start_value = 0
trigger_value = 3
number_of_pings = 11
with PingPong(start_value, trigger_value) as pp:
triggered_for_values = []
def cb_(val):
triggered_for_values.append(val)
pp.set_callback(cb_)
for _ in range(number_of_pings):
pp.ping()
print("With start at {}, trigger at {} and {} number of pings, here are the values that produced a trigger -> {}".format(
start_value, trigger_value, number_of_pings, triggered_for_values))
``` |
{
"source": "jmineroff/MovieGenrePrediction",
"score": 3
} |
#### File: src/utils/initialize.py
```python
import urllib
import requests
import json
import imdb
import time
import itertools
import wget
import os
import tmdbsimple as tmdb
import numpy as np
import random
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
api_key = os.environ['TMDB_API_KEY'] #Enter your own API key here to run the code below.
# Generate your own API key as explained above :)
tmdb.API_KEY = api_key #This sets the API key setting for the tmdb object
search = tmdb.Search() #this instantiates a tmdb "search" object which allows your to search for the movie
# Should be a command line arg
# set here the path where you want the scraped folders to be saved!
# These functions take in a string movie name i.e. like "The Matrix" or "Interstellar"
# What they return is pretty much clear in the name - Poster, ID , Info or genre of the Movie!
def grab_poster_tmdb(movie):
response = search.movie(query=movie)
id=response['results'][0]['id']
movie = tmdb.Movies(id)
title=movie.info()['original_title']
title='_'.join(title.split(' '))
os.system(strcmd)
def get_movie_id_tmdb(movie):
response = search.movie(query=movie)
movie_id=response['results'][0]['id']
return movie_id
def get_movie_info_tmdb(movie):
response = search.movie(query=movie)
id=response['results'][0]['id']
movie = tmdb.Movies(id)
info=movie.info()
return info
def get_movie_genres_tmdb(movie):
response = search.movie(query=movie)
id=response['results'][0]['id']
movie = tmdb.Movies(id)
genres=movie.info()['genres']
return genres
``` |
{
"source": "jming422/advent-of-code-2021",
"score": 3
} |
#### File: advent-of-code-2021/python/main.py
```python
import solvers.dayseven
def read_input(i):
with open(f"../puzzles/{i}-input.txt") as f:
return f.readlines()
PUZZLE = 7
def main():
lines = read_input(PUZZLE)
out = solvers.dayseven.part_two(lines)
print(out)
if __name__ == "__main__":
main()
```
#### File: python/solvers/dayfive.py
```python
def is_horiz_or_vert(line):
return line[0]["x"] == line[1]["x"] or line[0]["y"] == line[1]["y"]
def parse(input_line):
return [
{"x": int(x), "y": int(y)}
for x, y in [s.strip().split(",") for s in input_line.split("->")]
]
def do_it(input_lines, diagonals=False):
lines = [
line
for line in [parse(input_line) for input_line in input_lines]
if diagonals or is_horiz_or_vert(line)
]
max_x = max(point["x"] for line in lines for point in line)
max_y = max(point["y"] for line in lines for point in line)
plot = [[0 for _ in range(max_x + 1)] for _ in range(max_y + 1)]
for a, b in lines:
cur_x = a["x"]
cur_y = a["y"]
dest_x = b["x"]
dest_y = b["y"]
plot[cur_y][cur_x] += 1
while cur_x - dest_x != 0 or cur_y - dest_y != 0:
if cur_x < dest_x:
cur_x += 1
elif cur_x > dest_x:
cur_x -= 1
if cur_y < dest_y:
cur_y += 1
elif cur_y > dest_y:
cur_y -= 1
plot[cur_y][cur_x] += 1
return sum(1 for row in plot for point in row if point > 1)
def part_one(lines):
return do_it(lines)
def part_two(lines):
return do_it(lines, True)
```
#### File: python/solvers/daysix.py
```python
def part_one(lines):
fish = [int(f) for f in lines[0].split(",")]
for _ in range(80):
fish = [f - 1 for f in fish]
for i, f in enumerate(fish):
if f < 0:
fish[i] = 6
fish.append(8)
return len(fish)
# The solution from part_one takes WAY too much time & memory; it will
# practically never finish! So we have to do something cleverer. I got some
# help on this one from my brilliant wife and from
# https://barretblake.dev/blog/2021/12/advent-of-code-day6/, but the code
# itself I still wrote without copying.
def part_two(lines):
# Indices 0-8 are the fish states, values are the number of fish in each
# state
fish = [0] * 9
for f in lines[0].split(","):
fish[int(f)] += 1
for _ in range(256):
# fish.pop(0) will shift all the values left one index for us!
reproducing_fish = fish.pop(0)
fish[6] += reproducing_fish
fish.append(reproducing_fish)
return sum(fish)
``` |
{
"source": "jminjares4/Rasperry-Pi-Pico-Micropython",
"score": 4
} |
#### File: Rasperry-Pi-Pico-Micropython/General Purpose Input and Output (GPIO)/traffic_light_controller.py
```python
import machine
import utime
import _thread
# set leds as output, pin 13-15
led_red = machine.Pin(15, machine.Pin.OUT)
led_amber = machine.Pin(14, machine.Pin.OUT)
led_green = machine.Pin(13, machine.Pin.OUT)
# set button as input, pin 16
button = machine.Pin(16, machine.Pin.IN)
# set buzzer as output, pin 12
buzzer = machine.Pin(12, machine.Pin.OUT)
# create global variable
global button_pressed
button_pressed = False # set to false
# button function
def button_reader_thread():
global button_pressed # recall global variable
while True: # loop
if button.value() == 1: # check if button has been pressed
button_pressed = True # update global variable
# create a thread for the button
_thread.start_new_thread(button_reader_thread, ())
while True:
if button_pressed == True: # check if the button has been pressed
led_red.value(1) # turn red led
for i in range(10): # iterate
buzzer.value(1) # turn on buzzer
utime.sleep(0.2) # 0.2 second
buzzer.value(0) # turn off buzzer
utime.sleep(0.2) # 0.2 second
global button_pressed # recall global variable
button_pressed = False # update global variable
# turn traffic light sequence
led_red.value(1)
utime.sleep(5)
led_amber.value(1)
utime.sleep(2)
led_red.value(0)
led_amber.value(0)
led_green.value(1)
utime.sleep(5)
led_green.value(0)
led_amber.value(1)
utime.sleep(5)
led_amber.value(0)
``` |
{
"source": "jminor/omgifol",
"score": 3
} |
#### File: omgifol/demo/mirror.py
```python
import sys
# local
from omg import wad, mapedit, util
def mirror(map):
ed = mapedit.MapEditor(map)
for v in ed.vertexes:
v.x = -v.x
for l in ed.linedefs:
l.vx_a, l.vx_b = l.vx_b, l.vx_a
for t in ed.things:
t.x = -t.x
t.angle = (180 - t.angle) % 360
ed.nodes.data = ""
return ed.to_lumps()
def main(args):
if (len(args) < 2):
print " Omgifol script: mirror maps\n"
print " Usage:"
print " mirror.py input.wad output.wad [pattern]\n"
print " Mirror all maps or those whose name match the given pattern"
print " (eg E?M4 or MAP*)."
print " Note: nodes will have to be rebuilt externally.\n"
else:
print "Loading %s..." % args[0]
inwad = wad.WAD()
outwad = wad.WAD()
inwad.from_file(args[0])
pattern = "*"
if (len(args) == 3):
pattern = args[2]
for name in util.find(inwad.maps,pattern):
print "Mirroring %s" % name
outwad.maps[name] = mirror(inwad.maps[name])
print "Saving %s..." % args[1]
outwad.to_file(args[1])
if __name__ == "__main__": main(sys.argv[1:])
```
#### File: omgifol/demo/wad2obj.py
```python
__doc__ = """
Extracts textures and map geometry from a WAD file into an OBJ file,
MTL file and PNG files suitable for use in any 3D modeling program or
modern game engine.
"""
# python
import math, argparse, os, sys
# PIL
from PIL import Image
# local
from omg import txdef, wad, mapedit, util
# Constants
DEFAULT_MTL_TEXT = """Ka 1.000000 1.000000 1.000000
Kd 1.000000 1.000000 1.000000
Ks 0.000000 0.000000 0.000000
Tr 1.000000
illum 1
Ns 0.000000
"""
def linked_a_chain_from(chains, remaining_segments):
for chain in chains:
end = chain[-1]
# compare the actual coordinates of the start of this segment (segment)
# versus the end of the chain (end)
for segment in (s for s in remaining_segments if s[1] == end[0]):
# they match, so extend this chain
chain.append(segment)
remaining_segments.remove(segment)
return True
return False
class Polygon:
"""
Not really a polygon. Actually a set of faces that share a texture.
This is used for floor/ceilings of sectors, which may have disjoint
polygons. Also used for wall segments which are actually simple polygons.
"""
def __init__(self, texture=None):
self.vertices = []
self.segments = []
self.texture = texture
self.faces = []
self.textureCoords = []
def getFaces(self):
return self.faces
def getTextureCoords(self):
return self.textureCoords
def addFace(self, face, textureCoords):
self.faces.append(face)
self.textureCoords.append(textureCoords)
def addSegment(self, p1, p2, a, b):
"""
Feed us one line segment at a time, then call combineSegments()
"""
self.segments.append((p1,p2,a,b))
def combineSegments(self):
"""
Take all line segments we were given and try to combine them into faces.
"""
remaining_segments = list(self.segments)
if not remaining_segments:
return []
chains = []
# @TODO: Why is count computed this way?
max_count = len(remaining_segments) * 2
count = 0
while remaining_segments and count < max_count:
if chains and linked_a_chain_from(chains, remaining_segments):
count += 1
continue
chains.append([remaining_segments.pop()])
# grab the vertex indicies for each chain (aka face)
newFaces = [[segment[2] for segment in chain] for chain in chains]
self.faces.extend(newFaces)
# lets compute some textureCoords for these new faces
# based on their vertex coords in world space.
# this works well for floors and ceilings.
# flats are always 64x64 aligned to world coords
[self.textureCoords.append(
[(segment[0].x/64., segment[0].y/64.) for segment in chain])
for chain in chains]
def objmap(wad, name, filename, textureNames, textureSizes, centerVerts):
edit = mapedit.MapEditor(wad.maps[name])
# first lets get into the proper coordinate system
v = edit.vertexes[0]
bb_min = mapedit.Vertex(v.x,v.y)
bb_max = mapedit.Vertex(v.x,v.y)
for v in edit.vertexes:
v.x = -v.x
if bb_max.x > v.x: bb_max.x = v.x
if bb_max.y > v.y: bb_max.y = v.y
if bb_min.x < v.x: bb_min.x = v.x
if bb_min.y < v.y: bb_min.y = v.y
if centerVerts:
center = mapedit.Vertex((bb_min.x+bb_max.x)/2, (bb_min.y+bb_max.y)/2)
else:
center = mapedit.Vertex(0,0)
vi = 1 # vertex index (starting at 1 for the 1st vertex)
vertexes = []
polys = []
_sectors_with_floor_and_ceil_added(edit.sectors)
_polygons_with_line_definitions(edit, vi, vertexes, textureSizes, polys)
for sector in edit.sectors:
for poly in (sector.floor, sector.ceil):
poly.combineSegments()
polys.append(poly)
ti = 1 # vertex texture index (starting at 1 for the 1st "vt" statement)
with open(filename, "w") as out:
out.write("# %s\n" % name)
out.write("mtllib doom.mtl\n")
out.write("o %s\n" % name)
# here are all the vertices - in order, so you can index them (starting
# at 1) note that we stretch them to compensate for Doom's non-square
# pixel display
for v in vertexes:
out.write("v %g %g %g\n" % (v[0]-center.x, v[1]*1.2, v[2]-center.y))
for polyindex, poly in enumerate(polys):
if not poly.texture:
print "Polygon with no texture?", poly
continue
if poly.texture == '-' or poly.texture == 'F_SKY1':
# this was not meant to be rendered
continue
# polyindex starts at 1, enumerate starts at 0
out.write("g %s.%d %s\n" % (poly.texture, polyindex + 1, name))
texture_name = poly.texture
if poly.texture not in textureNames:
print "Missing texture", poly.texture
texture_name = "None"
out.write("usemtl %s\n" % texture_name)
for vindexes,textureCoords in zip(
poly.getFaces(),
poly.getTextureCoords()):
tindexes = []
for u,v in textureCoords:
out.write("vt %g %g\n" % (u, v))
tindexes.append(ti)
ti += 1
out.write(
"f %s\n" % " ".join([
"%s/%s" % (v,t) for v,t in zip(vindexes,tindexes)]))
def _polygons_with_line_definitions(edit, vi, vertexes, textureSizes, polys):
for line in edit.linedefs:
p1 = edit.vertexes[line.vx_a]
p2 = edit.vertexes[line.vx_b]
width = math.sqrt((p1.x-p2.x)*(p1.x-p2.x) + (p1.y-p2.y)*(p1.y-p2.y))
if line.front != -1:
side1 = edit.sidedefs[line.front]
sector1 = edit.sectors[side1.sector]
front_lower_left = vi
front_upper_left = vi+1
front_lower_right = vi+2
front_upper_right = vi+3
vertexes.append((p1.x, sector1.z_floor, p1.y)) # lower left
vertexes.append((p1.x, sector1.z_ceil, p1.y)) # upper left
vertexes.append((p2.x, sector1.z_floor, p2.y)) # lower right
vertexes.append((p2.x, sector1.z_ceil, p2.y)) # upper right
if not line.two_sided and side1.tx_mid!='-': #line.impassable:
polys.append(_poly_from_components(
side1, sector1,
textureSizes, width, line,
front_lower_left, front_lower_right,
front_upper_right, front_upper_left))
sector1.floor.addSegment(
p1, p2, front_lower_left, front_lower_right)
sector1.ceil.addSegment(
p2, p1, front_upper_right, front_upper_left)
vi += 4
if line.back != -1:
side2 = edit.sidedefs[line.back]
sector2 = edit.sectors[side2.sector]
back_lower_left = vi
back_upper_left = vi+1
back_lower_right = vi+2
back_upper_right = vi+3
vertexes.append((p1.x, sector2.z_floor, p1.y)) # lower left
vertexes.append((p1.x, sector2.z_ceil, p1.y)) # upper left
vertexes.append((p2.x, sector2.z_floor, p2.y)) # lower right
vertexes.append((p2.x, sector2.z_ceil, p2.y)) # upper right
if not line.two_sided and side2.tx_mid!='-': #line.impassable:
polys.append(_poly_from_components(
side2, sector2,
textureSizes, width, line,
back_lower_left,back_lower_right,
back_upper_right,back_upper_left))
sector2.floor.addSegment(p2, p1, back_lower_right, back_lower_left)
sector2.ceil.addSegment(p1, p2, back_upper_left, back_upper_right)
vi += 4
if line.front != -1 and line.back != -1 and line.two_sided:
# skip the lower texture if it is '-'
if side1.tx_low != '-':
# floor1 to floor2
poly = Polygon(side1.tx_low)
# the front (sector1) is lower than the back (sector2)
height = sector2.z_floor - sector1.z_floor
tsize = textureSizes.get(side1.tx_low, (64,64))
tw = width/float(tsize[0])
th = height/float(tsize[1])
tx = side1.off_x/float(tsize[0])
if line.lower_unpeg:
ty = (tsize[1]-height-side1.off_y)/float(tsize[1])
else:
ty = -side1.off_y/float(tsize[1])
poly.addFace(
(front_lower_left,front_lower_right,
back_lower_right,back_lower_left),
[(tx,ty),(tw+tx,ty),(tw+tx,th+ty),(tx,th+ty)])
polys.append(poly)
# skip the upper texture if it is '-'
# also skip the upper if the sectors on both sides have sky ceilings
if (
side1.tx_up != '-'
and not (
sector1.tx_ceil == 'F_SKY1'
and sector2.tx_ceil == 'F_SKY1')):
# ceil1 to ceil2
poly = Polygon(side1.tx_up)
# the front (sector1) is higher than the back (sector2)
height = sector1.z_ceil - sector2.z_ceil
tsize = textureSizes[side1.tx_up]
tw = width/float(tsize[0])
th = height/float(tsize[1])
tx = side1.off_x/float(tsize[0])
if line.upper_unpeg:
ty = (tsize[1]-height-side1.off_y)/float(tsize[1])
else:
ty = -side1.off_y/float(tsize[1])
poly.addFace(
(back_upper_left,back_upper_right,
front_upper_right,front_upper_left), \
[(tx,ty),(tw+tx,ty),(tw+tx,th+ty),(tx,th+ty)])
polys.append(poly)
def _poly_from_components(side1, sector1, textureSizes, width, line,
lower_left, lower_right, upper_right, upper_left):
poly = Polygon(side1.tx_mid)
height = sector1.z_ceil - sector1.z_floor
tsize = textureSizes.get(side1.tx_mid, (64,64))
tw = width/float(tsize[0])
th = height/float(tsize[1])
tx = side1.off_x/float(tsize[0])
if line.lower_unpeg: # yes, lower_unpeg applies to the tx_mid also
ty = -side1.off_y/float(tsize[1])
else:
# middle texture is usually top down
ty = (tsize[1]-height-side1.off_y)/float(tsize[1])
poly.addFace(
(lower_left, lower_right, upper_right, upper_left),
[(tx,ty),(tw+tx,ty),(tw+tx,th+ty),(tx,th+ty)])
return poly
def _sectors_with_floor_and_ceil_added(sectors):
for sector in sectors:
sector.floor = Polygon(texture=sector.tx_floor)
sector.ceil = Polygon(texture=sector.tx_ceil)
def writemtl(wad):
out = open("doom.mtl", "w")
out.write("# doom.mtl\n")
names = []
textureSizes = {}
# + wad.patches.items() # + wad.graphics.items() + wad.sprites.items()
textures = wad.flats.items()
for name,texture in textures:
texture.to_file(name+".png")
_texture_written_to(out, name)
names.append(name)
t = txdef.Textures(wad.txdefs)
for name,texture_definition in t.items():
image = Image.new(
'RGB',
(texture_definition.width, texture_definition.height))
# print "making %s at %dx%d" % (name, txdef.width, txdef.height)
for patchdef in texture_definition.patches:
# sometimes there are lower case letters!?
patchdef.name = patchdef.name.upper()
if patchdef.name not in wad.patches:
print ("ERROR: Cannot find patch named '%s' for "
"texture_definition '%s'" % (patchdef.name, name))
continue
patch = wad.patches[patchdef.name]
stamp = patch.to_Image()
image.paste(stamp, (patchdef.x,patchdef.y))
image.save(name+".png")
textureSizes[name] = image.size
_texture_written_to(out, name)
names.append(name)
return names, textureSizes
def _texture_written_to(out, name):
out.write("\nnewmtl %s\n" % name)
out.write(DEFAULT_MTL_TEXT)
out.write("map_Kd %s.png\n" % name)
def parse_args():
""" parse arguments out of sys.argv """
epilog = "Example: wad2obj.py doom.wad -m 'E1*' -o /tmp"
parser = argparse.ArgumentParser(description=__doc__, epilog=epilog)
parser.add_argument(
'source_wad', type=str, help='Path to the input WAD file.')
parser.add_argument(
'-l','--list', action='store_true', default=False,
help="List the names of the maps in the source wad without exporting anything.")
parser.add_argument(
'-m','--maps', type=str, default='*', metavar='PATTERN',
help="Pattern of maps to export (e.g. 'MAP*' or 'E?M1'). Use * as a wildcard or ? as any single character.")
parser.add_argument(
'-o','--output', type=str, default='.', metavar='PATH',
help="Directory path where output files will be written.")
parser.add_argument(
'-c','--center', action='store_true', default=False,
help="Translate the output vertices so the center of the map is at the origin.")
return parser.parse_args()
def main():
args = parse_args()
print "Loading %s..." % args.source_wad
inwad = wad.WAD()
inwad.from_file(args.source_wad)
if args.list:
print "Found %d maps:" % len(inwad.maps)
for mapName in inwad.maps.keys():
print " %s" % mapName
sys.exit(0)
# lets make sure all output files are written here
os.chdir(args.output)
# export the textures first, so we know all their sizes
textureNames, textureSizes = writemtl(inwad)
maps = util.find(inwad.maps, args.maps)
if len(maps) == 0:
print "No maps matching pattern '%s' were found." % (args.maps)
else:
print "Found %d maps matching pattern '%s'" % (len(maps), args.maps)
for name in maps:
objfile = name+".obj"
print "Writing %s" % objfile
objmap(inwad, name, objfile, textureNames, textureSizes, args.center)
"""
Sample code for debugging...
from omg import wad, txdef, mapedit
w = wad.WAD('doom.wad')
t = txdef.Textures(w.txdefs)
flat = w.flats['FLOOR0_1']
map = w.maps['E1M1']
edit = mapedit.MapEditor(map)
"""
if __name__ == "__main__":
main()
``` |
{
"source": "jminor/pyaaf2",
"score": 2
} |
#### File: pyaaf2/aaf2/ama.py
```python
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
import os
import sys
from .rational import AAFRational
from . import video, audio, mxf
from .auid import AUID
import struct
MediaContainerGUID = {
"Generic": (AUID("b22697a2-3442-44e8-bb8f-7a1cd290ebf1"),
('.3g2', '.3gp', '.aac', '.au', '.avi', '.bmp', '.dv', '.gif',
'.jfif', '.jpeg', '.jpg', '.m4a', '.mid', '.moov', '.mov',
'.movie', '.mp2', '.mp3', '.mp4', '.mpa', '.mpe', '.mpeg',
'.mpg', '.png', '.psd', '.qt', '.tif', '.tiff',)),
"AVCHD": (AUID("f37d624b307d4ef59bebc539046cad54"),
('.mts', '.m2ts',)),
"ImageSequencer": (AUID("4964178d-b3d5-485f-8e98-beb89d92a5f4"),
('.dpx',)),
"CanonRaw": (AUID("0f299461-ee19-459f-8ae6-93e65c76a892"),
('.rmf',)),
"WaveAiff": (AUID("3711d3cc-62d0-49d7-b0ae-c118101d1a16"),
('.wav', '.wave', '.bwf', '.aif', '.aiff', '.aifc', '.cdda',)),
"MXF": (AUID("60eb8921-2a02-4406-891c-d9b6a6ae0645"),
('.mxf',)),
"QuickTime": (AUID("781f84b7-b989-4534-8a07-c595cb9a6fb8"),
('.mov', '.mp4', '.m4v', '.mpg', '.mpe', '.mpeg', '.3gp', '.3g2',
'.qt', '.moov', '.movie', '.avi', '.mp2', '.mp3', '.m4a', '.wav',
'.aiff', '.aif', '.au', '.aac', '.mid', '.mpa', '.gif', '.jpg',
'.jpeg', '.jfif', '.tif', '.tiff', '.png', '.bmp', '.psd', '.dv')),
}
def get_wave_fmt(path):
"""
Returns a bytearray of the WAVE RIFF header and fmt
chunk for a `WAVEDescriptor` `Summary`
"""
with open(path, 'rb') as file:
if file.read(4) != b"RIFF":
return None
data_size = file.read(4) # container size
if file.read(4) != b"WAVE":
return None
while True:
chunkid = file.read(4)
sizebuf = file.read(4)
if len(sizebuf) < 4 or len(chunkid) < 4:
return None
size = struct.unpack(b'<L', sizebuf)[0]
if chunkid[0:3] != b"fmt":
if size % 2 == 1:
seek = size + 1
else:
seek = size
file.seek(seek, 1)
else:
return bytearray(b"RIFF" + data_size + b"WAVE" + chunkid + sizebuf + file.read(size))
def get_aifc_fmt(path):
"""
Compute the AIFC header information for a `AIFCDescriptor` `Summary`
:param path: file to read chunk from
:return: a `bytearray`
"""
with open(path, 'rb') as file:
if file.read(4) != b"FORM":
return None
data_size = file.read(4)
signature = file.read(4)
if signature not in (b"AIFF", b"AIFC"):
return None
while True:
chunkid = file.read(4)
sizebuf = file.read(4)
if len(sizebuf) < 4 or len(chunkid) < 4:
return None
size = struct.unpack(">L", sizebuf)[0]
if chunkid != b"COMM":
if size % 2 == 1:
seek = size + 1
else:
seek = size
file.seek(seek, 1)
else:
return bytearray(b"FORM" + data_size + signature + chunkid + sizebuf + file.read(size))
def create_network_locator(f, absolute_path):
n = f.create.NetworkLocator()
if sys.version_info[0] < 3:
import urllib
n['URLString'].value = 'file://' + urllib.pathname2url(absolute_path)
else:
import pathlib
n['URLString'].value = pathlib.Path(absolute_path).as_uri()
return n
class FormatInfo:
"""
Provides convenient access to commonly-used datums
"""
def __init__(self, metadata):
self.metadata = metadata
@property
def streams(self):
for stream in self.metadata['streams']:
yield StreamInfo(stream)
@property
def first_sound_stream(self):
return next((stream for stream in self.streams if stream.is_sound), None)
@property
def first_picture_stream(self):
return next((stream for stream in self.streams if stream.is_picture), None)
def create_descriptor(self, f, path):
if self.metadata['format']['format_name'] in ('wav',):
return self.create_wav_descriptor(f, path)
if self.metadata['format']['format_name'] in ('aiff',):
return self.create_aifc_descriptor(f,path)
elif self.metadata['format']['format_long_name'] == 'QuickTime / MOV':
return self.create_multistream_descriptor(f, path)
else:
return None
@property
def container_guid(self):
if self.metadata['format']['format_name'] in ('wav',):
return MediaContainerGUID['WaveAiff'][0]
# if self.metadata['format']['format_long_name'] == 'QuickTime / MOV':
# return MediaContainerGUID['QuickTime'][0]
# just using the generic appears to work
return MediaContainerGUID['Generic'][0]
@property
def edit_rate(self):
"""
:return: The edit rate of the first picture stream, or if there are none, the first sound stream.
"""
pix = self.first_picture_stream
if pix is None:
return self.first_sound_stream.edit_rate
else:
return pix.edit_rate
@property
def length(self):
"""
:return: The length of the first picture stream, or if there are none, the first sound stream.
"""
pix = self.first_picture_stream
if pix is None:
return self.first_sound_stream.length
else:
return pix.length
def create_wav_descriptor(self, f, path):
d = f.create.WAVEDescriptor()
stream = self.first_sound_stream
d['SampleRate'].value = stream.edit_rate
d['Summary'].value = get_wave_fmt(path)
d['Length'].value = stream.length
d['ContainerFormat'].value = f.dictionary.lookup_containerdef("AAF")
d['Locator'].append(create_network_locator(f, path))
return d
def create_aifc_descriptor(self, f, path):
d = f.create.AIFCDescriptor()
stream = self.first_sound_stream
d['SampleRate'].value = stream.edit_rate
d['Summary'].value = get_aifc_fmt(path)
d['Length'].value = stream.length
d['ContainerFormat'].value = f.dictionary.lookup_containerdef("AAF")
d['Locator'].append(create_network_locator(f, path))
return d
def coalesce_descriptors(self, f, descriptors, path):
if len(descriptors) > 1:
desc = f.create.MultipleDescriptor()
desc['Length'].value = self.length
desc['SampleRate'].value = self.edit_rate
desc['MediaContainerGUID'].value = self.container_guid
desc['Locator'].append(create_network_locator(f, path))
desc['FileDescriptors'].value = descriptors
return desc
else:
return descriptors[0]
def create_multistream_descriptor(self, f, path):
descriptor_list = []
for stream in self.streams:
if stream.is_picture:
desc = stream.create_video_descriptor(f)
descriptor_list.append(desc)
desc['Locator'].append(create_network_locator(f, path))
elif stream.is_sound:
desc = stream.create_pcm_descriptor(f)
descriptor_list.append(desc)
desc['Locator'].append(create_network_locator(f, path))
return self.coalesce_descriptors(f, descriptor_list, path)
class StreamInfo:
def __init__(self, metadata):
self.metadata = metadata
@property
def codec_type(self):
return self.metadata['codec_type']
@property
def codec_name(self):
return self.metadata['codec_name']
@property
def is_sound(self):
return self.codec_type == 'audio'
@property
def is_picture(self):
return self.codec_type == 'video'
@property
def edit_rate(self):
if self.is_sound:
return AAFRational(self.metadata['sample_rate'])
elif self.is_picture:
return AAFRational(self.metadata['avg_frame_rate'])
@property
def length(self):
if self.is_sound:
return int(self.metadata['duration_ts'])
elif self.is_picture:
return int(self.metadata['nb_frames'])
@property
def physical_track_count(self):
if self.is_sound:
return self.metadata['channels']
def create_pcm_descriptor(self, f):
if not self.is_sound:
return None
d = f.create.PCMDescriptor()
d['SampleRate'].value = self.edit_rate
d['AudioSamplingRate'].value = self.edit_rate
d['Channels'].value = self.physical_track_count
d['AverageBPS'].value = int(self.metadata['bit_rate'])
bit_depth, block_align = audio.audio_format_sizes.get(self.metadata['sample_fmt'], (0, 0))
d['QuantizationBits'].value = bit_depth
d['BlockAlign'].value = block_align
d['Length'].value = self.length
d['Compression'].value = AUID('04020202-0000-0000-060e-2b3404010101')
return d
def pixel_sizes(self):
if not self.is_picture:
return None
pix_fmt = self.metadata['pix_fmt']
h_samp = 2
v_samp = 2
depth = 8
if pix_fmt.count('420'):
h_samp = 2
v_samp = 2
elif pix_fmt.count('422'):
h_samp = 2
v_samp = 1
elif pix_fmt.count('444'):
h_samp = 1
v_samp = 1
for i in [8, 10, 12, 16]:
if pix_fmt.count("p%d" % i):
depth = i
break
return (depth, h_samp, v_samp)
def get_avc_compression(self):
if not self.is_picture:
return None
profile = self.metadata.get('profile', None)
key = 'CompressedPicture'
if profile == "Baseline":
key = 'AVCBaselineUnconstrained'
elif profile == "Constrained Baseline":
key = 'AVCConstrainedBaselineUnconstrained'
elif profile == "Main":
key = 'AVCMainUnconstrained'
elif profile == "Extended":
key = 'AVCExtendedUnconstrained'
elif profile == "High":
key = 'AVCHighUnconstrained'
elif profile == "High 10":
key = 'AVCHigh10Unconstrained'
elif profile == "High 10 Intra":
key = 'AVCHigh10IntraUnconstrained'
elif profile == "High 4:2:2":
key = 'AVCHigh422Unconstrained'
elif profile == "High 4:2:2 Intra":
key = 'AVCHigh422IntraUnconstrained'
elif profile == "High 4:4:4":
# key = 'AVCHigh444IntraUnconstrained'
key = 'CompressedPicture'
elif profile == "High 4:4:4 Predictive":
# key = 'AVCHigh444PredictiveUnconstrained'
key = 'CompressedPicture'
elif profile == "High 4:4:4 Intra":
# key = 'AVCHigh444IntraUnconstrained'
key = 'CompressedPicture'
elif profile == 'CAVLC 4:4:4':
# key = 'AVCCAVLC444IntraUnconstrained'
key = 'CompressedPicture'
return video.compression_ids[key]
def get_compression(self):
if not self.is_picture:
return None
codec_name = self.metadata.get('codec_name', None)
if codec_name == 'mjpeg':
return video.compression_ids['mjpeg']
if codec_name == 'h264':
return self.get_avc_compression()
return video.compression_ids['CompressedPicture']
def create_video_descriptor(self, f):
if not self.is_picture:
return None
d = f.create.CDCIDescriptor()
depth, h_samp, v_samp = self.pixel_sizes()
width = self.metadata['width']
height = self.metadata['height']
aspect_ratio = "%d/%d" % (width, height)
d['ComponentWidth'].value = depth
d['HorizontalSubsampling'].value = h_samp
d['VerticalSubsampling'].value = v_samp
d['FrameLayout'].value = 'FullFrame'
d['VideoLineMap'].value = [0, 0]
# d['VideoLineMap'].value = [42, 0]
d['ImageAspectRatio'].value = aspect_ratio
d['StoredWidth'].value = width
d['StoredHeight'].value = height
d['SampleRate'].value = self.metadata['avg_frame_rate']
compression = self.get_compression()
d['Compression'].value = compression
# d['ResolutionID'].value = 2900
d['Length'].value = int(self.length)
return d
def create_media_link(f, path, metadata):
"""
Create an essence linked to external media and all obligatory mobs and data structures required by
the edit spec.
The returned :class:`aaf.mobs.MasterMob` will have one slot for each video stream and each audio channel
in the file at `path`.
Example: The linked file is a Quicktime movie with picture and a stereo audio track. This function will create a
SourceMob with three slots, one picture slot, and two sound slots, for audio channels one and two respectively.
The function will also create a derivation SourceMob, linked to these slots.
:param f: The :class:`aaf.File` to add this link to
:param path: A path recognizable to `os.path`
:param metadata: Pre-fetched media description (in the form of a dictionary)
from "ffprobe -show_format -show_streams"
:return: A `aaf.mobs.MasterMob` linked to the file at link.
"""
def tape_mob_for_format(name, format_info):
tape_mob = f.create.SourceMob()
tape_mob.name = name
picture = format_info.first_picture_stream
if picture is not None:
pix_slot = tape_mob.create_picture_slot(edit_rate=picture.edit_rate)
pix_slot.segment.length = picture.length
tape_source_clip = f.create.SourceClip(media_kind='picture')
tape_source_clip.length = picture.length
pix_slot.segment.components.append(tape_source_clip)
sound = format_info.first_sound_stream
if sound is not None:
for channel in range(sound.physical_track_count):
sound_slot = tape_mob.create_sound_slot(edit_rate=sound.edit_rate)
sound_slot.segment.length = sound.length
tape_source_clip = f.create.SourceClip(media_kind='sound')
tape_source_clip.length = sound.length
sound_slot.segment.components.append(tape_source_clip)
# not setting PhysicalTrackNumber here because we didn't before
f.content.mobs.append(tape_mob)
tape_mob.descriptor = f.create.ImportDescriptor()
return tape_mob
def append_source_to_mob_as_new_slots(from_mob, to_mob):
sound_physical_track = 1
for from_slot in from_mob.slots:
slot_kind = from_slot.media_kind
if slot_kind in ("Picture", "Sound"):
from_clip = from_mob.create_source_clip(slot_id=from_slot.slot_id, media_kind=slot_kind)
to_slot = to_mob.create_empty_sequence_slot(edit_rate=from_slot.edit_rate,
media_kind=from_clip.media_kind)
to_slot.segment.components.append(from_clip)
if slot_kind == 'Sound':
to_slot['PhysicalTrackNumber'].value = sound_physical_track
sound_physical_track += 1
def source_mob_from_tape_mob(name, tape_mob):
source_mob = f.create.SourceMob()
source_mob.name = name
append_source_to_mob_as_new_slots(from_mob=tape_mob, to_mob=source_mob)
f.content.mobs.append(source_mob)
return source_mob
def master_mob_from_source_mob(name, source_mob):
master_mob = f.create.MasterMob()
master_mob.name = name
append_source_to_mob_as_new_slots(from_mob=source_mob, to_mob=master_mob)
f.content.mobs.append(master_mob)
return master_mob
def create_mobs(name, format_info):
tmob = tape_mob_for_format(name + ' <TAPE MOB>', format_info)
smob = source_mob_from_tape_mob(name + ' <SOURCE MOB>', tmob)
mmob = master_mob_from_source_mob(name, smob)
if format_info.first_picture_stream is not None:
# MC Quicktime plugin will error if this is not set
smob.comments['Video'] = format_info.first_picture_stream.codec_name
return mmob, smob, tmob
basename = os.path.basename(path)
name, ext = os.path.splitext(basename)
if ext == '.mxf' or ext == '.MXF':
m = mxf.MXFFile(path)
m.ama = True
m.dump()
return m.link(f)
format_info = FormatInfo(metadata)
source_descriptor = format_info.create_descriptor(f, path)
if source_descriptor is None:
return None
master_mob, source_mob, tape_mob = create_mobs(name, format_info)
source_mob.descriptor = source_descriptor
return master_mob, source_mob, tape_mob
```
#### File: pyaaf2/tests/test_ama.py
```python
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
import os
import unittest
import common
import aaf2
from aaf2 import ama
avc_profiles = [('yuv420p', 'baseline'),
('yuv420p', 'main'),
('yuv420p', 'high'),
('yuv422p', 'high422'),
# ('yuv444p', 'high444')# unsupported in MC?
]
prores_profiles = [
(0, 'proxy'),
(1, 'LT'),
(2, 'standard'),
(3, 'HQ'),
(4, '4444'),
]
class AMATests(unittest.TestCase):
def assert_mastermob_valid_edit_spec(self, master_mob, expected_picture_slots, expected_sound_slots):
"""
Verifies that a MasterMob has all the required elements for the edit spec
:param master_mob:
:return:
"""
physical_sound_tracks = []
self.assertIsNotNone(master_mob, "Failed to find MasterMob by saved ID")
self.assertEqual(len(master_mob.slots), expected_picture_slots + expected_sound_slots,
"Failed to find correct number of slots in MasterMob")
sound_slots = list([slot for slot in master_mob.slots if slot.segment.media_kind == 'Sound'])
picture_slots = list([slot for slot in master_mob.slots if slot.segment.media_kind == 'Picture'])
self.assertEqual(len(sound_slots), expected_sound_slots, "MasterMob has incorrect number of sound slots")
self.assertEqual(len(picture_slots), expected_picture_slots, "MasterMon has incorrect number of picture slots")
file_source_mob = None
for mob_slot in master_mob.slots:
self.assertIsInstance(mob_slot.segment, aaf2.components.Sequence, "MobSlot has no Sequence")
self.assertEqual(len(mob_slot.segment.components), 1, "Mob slot must have exactly one component")
self.assertIsInstance(mob_slot.segment.components[0], aaf2.components.SourceClip,
"Mob slot sequence has incorrect component type")
source_clip = mob_slot.segment.components[0]
self.assertIsNotNone(source_clip.start, "SourceClip does not have a start time")
self.assertIsNotNone(source_clip.length, "SourceClip does not have a length time")
self.assertTrue(source_clip.length > 0, "SourceClip appears to have invalid length")
self.assertIsNotNone(source_clip.mob, "MasterMob SourceClip does not have a linked SourceMob")
if file_source_mob is None:
file_source_mob = source_clip.mob
else:
self.assertEqual(source_clip.mob, file_source_mob,
"MasterMob slots appear to reference different SourceMOBs")
if mob_slot.media_kind == 'Sound':
physical_sound_tracks.append(int(mob_slot['PhysicalTrackNumber'].value))
self.assertEqual(len(source_clip.mob.slots), expected_picture_slots + expected_sound_slots,
"SourceMob has incorrect number of slots")
tape_source_clip = file_source_mob.slot_at(mob_slot.slot_id).segment.components[0]
self.assertIsInstance(tape_source_clip, aaf2.components.SourceClip,
"File SourceMob does not have a tape SourceMob")
if isinstance(tape_source_clip.mob.descriptor, aaf2.essence.TapeDescriptor):
pass
elif isinstance(tape_source_clip.mob.descriptor, aaf2.essence.ImportDescriptor):
pass
else:
self.fail("Tape SourceMob descriptor must be either a TapeDescriptor or ImportDescriptor")
for chan_num in range(expected_sound_slots):
self.assertEqual(physical_sound_tracks.count(chan_num + 1), 1,
"Incorrect PhysicalTrackNumber property on master mob slot")
self.assertEqual(len(physical_sound_tracks), expected_sound_slots,
"Incorrect PhysicalTrackNumber count on master mob slot")
def assert_valid_multiple_descriptor(self, mastermob, expected_audio_channel_count):
for mob_slot in mastermob.slots:
source_clip = mob_slot.segment.components[0]
self.assertIsNotNone(source_clip.mob.comments['Video'],
"SourceMob must have a value for 'Video' comment")
self.assertEqual(len(source_clip.mob.descriptor['FileDescriptors'].value), 2,
"SourceMob's descriptor has incorrect 'FileDescriptor' property value")
self.assertIsInstance(source_clip.mob.descriptor, aaf2.essence.MultipleDescriptor,
"SourceClip Mob has incorrect descriptor")
for descriptor in source_clip.mob.descriptor['FileDescriptors'].value:
self.assertIsNotNone(descriptor['Locator'].value,
"SourceClip descriptor not properly formatted")
locators = descriptor['Locator'].value
self.assertTrue(len(locators) >= 1)
if isinstance(descriptor, aaf2.essence.PCMDescriptor):
self.assertEqual(descriptor['Channels'].value, expected_audio_channel_count,
"SourceClip descriptor not properly formatted")
elif isinstance(descriptor, aaf2.essence.CDCIDescriptor):
self.assertIsInstance(descriptor['ComponentWidth'].value, int)
self.assertIsInstance(descriptor['HorizontalSubsampling'].value, int)
else:
self.fail("Encountered unexpected essence descriptor")
def test_monoaural_wav(self):
new_file = os.path.join(common.sandbox(), 'ama_wav.aaf')
with aaf2.open(new_file, 'w') as f:
wavfile = common.generate_pcm_audio_mono('test_ama.wav', fmt='wav')
meta = common.probe(wavfile)
mobs = ama.create_media_link(f, wavfile, meta)
self.assertTrue(len(mobs), 3)
with aaf2.open(new_file, 'r') as f:
common.walk_aaf(f.root)
self.assertTrue(len(f.content.mobs) == 3)
self.assertTrue(len(list(f.content.mastermobs())) == 1)
master_mob = next(f.content.mastermobs())
self.assert_mastermob_valid_edit_spec(master_mob=master_mob, expected_sound_slots=1,
expected_picture_slots=0)
self.assertEqual(len(master_mob.slots), 1, "MasterMob should only have one slot")
self.assertEqual(master_mob.slots[0].media_kind, 'Sound', "MasterMob slot has incorrect media_kind")
source_clip = master_mob.slots[0].segment.components[0]
descriptor = source_clip.mob.descriptor
self.assertIsNotNone(descriptor, "File SourceMob has no WAVEDescriptor")
self.assertIsInstance(descriptor, aaf2.essence.WAVEDescriptor, "File SourceMob has no WAVEDescriptor")
self.assertIsNotNone(descriptor['Summary'].value, "WAVEDescriptor missing required 'Summary' property")
def test_monoaural_aiff(self):
new_file = os.path.join(common.sandbox(), 'ama_aiff.aaf')
with aaf2.open(new_file, 'w') as f:
aiff_file = common.generate_pcm_audio_mono('test_ama_aiff', fmt='aiff')
meta = common.probe(aiff_file)
mobs = ama.create_media_link(f, aiff_file, meta)
self.assertTrue( len(mobs), 3)
with aaf2.open(new_file, 'r') as f:
common.walk_aaf(f.root)
self.assertTrue(len(f.content.mobs) == 3)
self.assertTrue(len(list(f.content.mastermobs())) == 1)
master_mob = next(f.content.mastermobs())
self.assert_mastermob_valid_edit_spec(master_mob=master_mob, expected_sound_slots=1,
expected_picture_slots=0)
self.assertEqual(len(master_mob.slots), 1, "MasterMob should only have one slot")
self.assertEqual(master_mob.slots[0].media_kind, 'Sound', "MasterMob slot has incorrect media_kind")
source_clip = master_mob.slots[0].segment.components[0]
descriptor = source_clip.mob.descriptor
self.assertIsNotNone(descriptor, "File SourceMob has no WAVEDescriptor")
self.assertIsInstance(descriptor, aaf2.essence.AIFCDescriptor, "File SourceMob has no AIFCDescriptor")
self.assertIsNotNone(descriptor['Summary'].value, "AIFCDescriptor missing required 'Summary' property")
def test_avc_mov(self):
new_file = os.path.join(common.sandbox(), 'avc_mov.aaf')
audio_channel_count = 2
created_mastermob_ids = []
with aaf2.open(new_file, 'w') as f:
for (pix_fmt, profile) in avc_profiles:
vcodec = ['-pix_fmt', pix_fmt, '-c:v', 'h264', '-profile:v', profile]
mov = common.generate_mov('ama_avc_%s.mov' % profile, overwrite=False, vcodec=vcodec,
audio_channels=audio_channel_count)
meta = common.probe(mov)
# print(meta['streams'][0]['profile'])
mobs = f.content.create_ama_link(mov, meta)
self.assertEqual(len(mobs), 3)
self.assertIsInstance(mobs[0], aaf2.mobs.MasterMob)
self.assertIsInstance(mobs[1], aaf2.mobs.SourceMob)
self.assertIsInstance(mobs[2], aaf2.mobs.SourceMob)
created_mastermob_ids.append(mobs[0].mob_id)
with aaf2.open(new_file, 'r') as f:
common.walk_aaf(f.root)
self.assertEqual(len(f.content.mobs), len(avc_profiles) * 3,
"Failed to create exactly three MOBs per avc_profile")
self.assertEqual(len(list(f.content.mastermobs())), len(avc_profiles),
"Failed to create exactly one MasterMOB per avc_profile")
for mastermob_id in created_mastermob_ids:
mastermob = next((mob for mob in f.content.mobs if mob.mob_id == mastermob_id), None)
self.assert_mastermob_valid_edit_spec(mastermob, expected_picture_slots=1, expected_sound_slots=2)
self.assert_valid_multiple_descriptor(mastermob, audio_channel_count)
def test_prores(self):
new_file = os.path.join(common.sandbox(), 'prores_mov.aaf')
created_mastermob_ids = []
with aaf2.open(new_file, 'w') as f:
for profile, name in prores_profiles:
vcodec = ['-c:v', 'prores_ks', '-profile:v', str(profile)]
mov = common.generate_mov('ama_prores_%s.mov' % (name,), overwrite=False, vcodec=vcodec,
audio_channels=2)
meta = common.probe(mov)
mobs = ama.create_media_link(f, mov, meta)
self.assertEqual(len(mobs), 3, "create_ama_link must return exactly three mobs")
created_mastermob_ids.append(mobs[0].mob_id)
with aaf2.open(new_file, 'r') as f:
common.walk_aaf(f.root)
self.assertEqual(len(f.content.mobs), len(prores_profiles) * 3,
"Failed to create exactly three MOBs per prores_profile")
self.assertEqual(len(list(f.content.mastermobs())), len(prores_profiles),
"Failed to create exactly one MasterMOB per prores_profile")
for mastermob_id in created_mastermob_ids:
mastermob = next((mob for mob in f.content.mobs if mob.mob_id == mastermob_id), None)
self.assert_mastermob_valid_edit_spec(mastermob, expected_picture_slots=1, expected_sound_slots=2)
self.assert_valid_multiple_descriptor(mastermob, 2)
if __name__ == "__main__":
import logging
# logging.basicConfig(level=logging.DEBUG)
unittest.main()
``` |
{
"source": "jminor/pyxel-demo",
"score": 4
} |
#### File: jminor/pyxel-demo/game.py
```python
import pyxel
import math
SPEED = 0.4
DEBUG = None
def lerp(a, b, t):
"""lerp = linear interpolation.
Returns a value between a and b, based on the value of t
When t=0, a is returned. When t=1, b is returned.
When t is between 0 and 1, a value mixed between a and b is returned.
For example, lerp(10,20,0.5) will return 15.
Note the result is not clamped, so if t is less than 0 or greater than 1
then the value is extrapolated beyond a or b.
"""
return a + (b - a) * t
def walkable(x, y):
"""Is the map tile at x,y walkable?"""
t = pyxel.tilemap(0).get(x,y)
return t == 0
def wobble(amount):
t = 5 * pyxel.frame_count / 30.0
off = pyxel.image(2)
screen = pyxel.image(4, system=True)
off.copy(0,0, 4, 0,0, pyxel.width,pyxel.height)
# x = amount * math.sin(t)
for y in range(pyxel.height):
x = amount * math.sin(10 * (y / pyxel.height) + t)
screen.copy(
x,y,
2,
0,y,
pyxel.width,1
)
class Thing:
def __init__(self, name, x, y):
self.name = name
# grid coordinates
self.x = x
self.y = y
# pixel coordinates
self.px = x*8
self.py = y*8
def update(self, speed=SPEED):
# smoothly step pixel coordinates px,py towards grid x,y
# by default use SPEED, but you can override that to go
# quicker or slower.
self.px = lerp(self.px, self.x*8, speed)
self.py = lerp(self.py, self.y*8, speed)
class Sprite(Thing):
def __init__(self, name, x, y, tile):
super().__init__(name, x, y)
self.tile = tile
self.xflip = 1
self.yflip = 1
def draw(self, camera):
# pyxel.rect(self.px - camera.px, self.py - camera.py, 8, 8, 9)
pyxel.blt(
# pixel coords to draw
self.px - camera.px,
self.py - camera.py,
# read from image bank 0, where the sprites are
0,
# read from the right spot based on the tile number
(self.tile % 32) * 8,
(self.tile // 32) * 8,
# width and height
8 * self.xflip,
8 * self.yflip,
# which color is transparent?
0
)
class Pot(Sprite):
def smash(self):
self.tile += 1
class Player(Sprite):
def __init__(self, name, x, y, t):
super().__init__(name, x, y, t)
self.gems = 0
self.facing = "east"
def keys_pressed(self, *keys):
for k in keys:
if pyxel.btnp(k, 8, 8):
return True
def update(self):
# which way is the controller pressed?
# arrow keys, WASD, etc.
cx = 0
cy = 0
if self.keys_pressed(pyxel.KEY_UP, pyxel.KEY_W):
cy -= 1
self.facing = "north"
if self.keys_pressed(pyxel.KEY_DOWN, pyxel.KEY_S):
cy += 1
self.facing = "south"
if self.keys_pressed(pyxel.KEY_LEFT, pyxel.KEY_A):
cx -= 1
self.xflip = -1
self.facing = "east"
if self.keys_pressed(pyxel.KEY_RIGHT, pyxel.KEY_D):
cx += 1
self.xflip = 1
self.facing = "west"
if walkable(self.x + cx, self.y + cy):
self.x += cx
self.y += cy
super().update()
class App:
def __init__(self):
pyxel.init(128, 128)
self.reset()
pyxel.run(self.update, self.draw)
def reset(self):
pyxel.load("assets/my_resource.pyxres")
self.fade_in = 30
self.camera = Thing("camera", 0, 0)
self.sprites = []
self.colliders = []
self.tilemap = pyxel.tilemap(0)
self.scan_map()
def colliders_at(self, x, y):
# result = []
# for sprite in self.colliders:
# if sprite.x == x and sprite.y == y:
# result.append(sprite)
# return result
return [s for s in self.colliders if s.x==x and s.y==y]
def scan_map(self):
"""Scan the map for special tiles, spawning sprites, etc."""
for y in range(self.tilemap.height):
for x in range(self.tilemap.width):
t = self.tilemap.get(x,y)
if t == 0:
# empty
pass
elif t == 1:
# solid
pass
elif t == 2:
# pot
sprite = Pot("pot", x, y, t)
self.sprites.append(sprite)
self.colliders.append(sprite)
self.tilemap.set(x,y,0)
elif t == 32:
# player
self.player = Player("player", x, y, t)
self.tilemap.set(x,y,0)
elif t == 33:
# gem
sprite = Sprite("gem", x, y, t)
self.sprites.append(sprite)
self.colliders.append(sprite)
self.tilemap.set(x,y,0)
else:
raise Exception("unexpected map tile? "+str(t))
def smash_pot(self, pot):
pot.smash()
# leave it in sprites, but remove it from colliders
self.colliders.remove(pot)
def pickup_gem(self, gem):
self.player.gems += 1
# remove it from both sprites, and colliders
self.sprites.remove(gem)
self.colliders.remove(gem)
def update(self):
if pyxel.btnp(pyxel.KEY_R):
self.reset()
self.player.update()
for sprite in self.sprites:
sprite.update()
for thing in self.colliders_at(self.player.x, self.player.y):
if thing.name == "pot":
self.smash_pot(thing)
if thing.name == "gem":
self.pickup_gem(thing)
# camera follows the player
# self.camera.y = self.player.y - 8
if self.player.x <= self.camera.x or self.player.x >= self.camera.x+15:
self.camera.x = self.player.x - 8
if self.player.y <= self.camera.y or self.player.y >= self.camera.y+15:
self.camera.y = self.player.y - 8
# camera scrolls at a different speed than everything else
self.camera.update(0.5)
global DEBUG
DEBUG = "\nPL {},{}\n{},{}\nCAM {},{}\n{},{}".format(
self.player.x, self.player.y,
self.player.px, self.player.py,
self.camera.x, self.camera.y,
self.camera.px, self.camera.py
)
def draw(self):
pyxel.cls(0)
pyxel.bltm(-self.camera.px, -self.camera.py, 0, 0, 0, self.tilemap.width, self.tilemap.height)
for sprite in self.sprites:
sprite.draw(self.camera)
self.player.draw(self.camera)
if self.fade_in > 0:
wobble(self.fade_in)
self.fade_in = self.fade_in - 1
pyxel.text(1, 1, "GEMS: {}".format(self.player.gems), 7)
if DEBUG is not None:
pyxel.text(1, 9, "DEBUG: {}".format(DEBUG), 8)
App()
``` |
{
"source": "jminsk-cc/xarray",
"score": 3
} |
#### File: xarray/core/concat.py
```python
import warnings
from collections import OrderedDict
import pandas as pd
from . import dtypes, utils
from .alignment import align
from .variable import IndexVariable, Variable, as_variable
from .variable import concat as concat_vars
def concat(
objs,
dim=None,
data_vars="all",
coords="different",
compat="equals",
positions=None,
indexers=None,
mode=None,
concat_over=None,
fill_value=dtypes.NA,
join="outer",
):
"""Concatenate xarray objects along a new or existing dimension.
Parameters
----------
objs : sequence of Dataset and DataArray objects
xarray objects to concatenate together. Each object is expected to
consist of variables and coordinates with matching shapes except for
along the concatenated dimension.
dim : str or DataArray or pandas.Index
Name of the dimension to concatenate along. This can either be a new
dimension name, in which case it is added along axis=0, or an existing
dimension name, in which case the location of the dimension is
unchanged. If dimension is provided as a DataArray or Index, its name
is used as the dimension to concatenate along and the values are added
as a coordinate.
data_vars : {'minimal', 'different', 'all' or list of str}, optional
These data variables will be concatenated together:
* 'minimal': Only data variables in which the dimension already
appears are included.
* 'different': Data variables which are not equal (ignoring
attributes) across all datasets are also concatenated (as well as
all for which dimension already appears). Beware: this option may
load the data payload of data variables into memory if they are not
already loaded.
* 'all': All data variables will be concatenated.
* list of str: The listed data variables will be concatenated, in
addition to the 'minimal' data variables.
If objects are DataArrays, data_vars must be 'all'.
coords : {'minimal', 'different', 'all' or list of str}, optional
These coordinate variables will be concatenated together:
* 'minimal': Only coordinates in which the dimension already appears
are included.
* 'different': Coordinates which are not equal (ignoring attributes)
across all datasets are also concatenated (as well as all for which
dimension already appears). Beware: this option may load the data
payload of coordinate variables into memory if they are not already
loaded.
* 'all': All coordinate variables will be concatenated, except
those corresponding to other dimensions.
* list of str: The listed coordinate variables will be concatenated,
in addition to the 'minimal' coordinates.
compat : {'equals', 'identical'}, optional
String indicating how to compare non-concatenated variables and
dataset global attributes for potential conflicts. 'equals' means
that all variable values and dimensions must be the same;
'identical' means that variable attributes and global attributes
must also be equal.
positions : None or list of integer arrays, optional
List of integer arrays which specifies the integer positions to which
to assign each dataset along the concatenated dimension. If not
supplied, objects are concatenated in the provided order.
fill_value : scalar, optional
Value to use for newly missing values
join : {'outer', 'inner', 'left', 'right', 'exact'}, optional
String indicating how to combine differing indexes
(excluding dim) in objects
- 'outer': use the union of object indexes
- 'inner': use the intersection of object indexes
- 'left': use indexes from the first object with each dimension
- 'right': use indexes from the last object with each dimension
- 'exact': instead of aligning, raise `ValueError` when indexes to be
aligned are not equal
- 'override': if indexes are of same size, rewrite indexes to be
those of the first object with that dimension. Indexes for the same
dimension must have the same size in all objects.
indexers, mode, concat_over : deprecated
Returns
-------
concatenated : type of objs
See also
--------
merge
auto_combine
"""
# TODO: add ignore_index arguments copied from pandas.concat
# TODO: support concatenating scalar coordinates even if the concatenated
# dimension already exists
from .dataset import Dataset
from .dataarray import DataArray
try:
first_obj, objs = utils.peek_at(objs)
except StopIteration:
raise ValueError("must supply at least one object to concatenate")
if dim is None:
warnings.warn(
"the `dim` argument to `concat` will be required "
"in a future version of xarray; for now, setting it to "
"the old default of 'concat_dim'",
FutureWarning,
stacklevel=2,
)
dim = "concat_dims"
if indexers is not None: # pragma: no cover
warnings.warn(
"indexers has been renamed to positions; the alias "
"will be removed in a future version of xarray",
FutureWarning,
stacklevel=2,
)
positions = indexers
if mode is not None:
raise ValueError(
"`mode` is no longer a valid argument to "
"xarray.concat; it has been split into the "
"`data_vars` and `coords` arguments"
)
if concat_over is not None:
raise ValueError(
"`concat_over` is no longer a valid argument to "
"xarray.concat; it has been split into the "
"`data_vars` and `coords` arguments"
)
if isinstance(first_obj, DataArray):
f = _dataarray_concat
elif isinstance(first_obj, Dataset):
f = _dataset_concat
else:
raise TypeError(
"can only concatenate xarray Dataset and DataArray "
"objects, got %s" % type(first_obj)
)
return f(objs, dim, data_vars, coords, compat, positions, fill_value, join)
def _calc_concat_dim_coord(dim):
"""
Infer the dimension name and 1d coordinate variable (if appropriate)
for concatenating along the new dimension.
"""
from .dataarray import DataArray
if isinstance(dim, str):
coord = None
elif not isinstance(dim, (DataArray, Variable)):
dim_name = getattr(dim, "name", None)
if dim_name is None:
dim_name = "concat_dim"
coord = IndexVariable(dim_name, dim)
dim = dim_name
elif not isinstance(dim, DataArray):
coord = as_variable(dim).to_index_variable()
dim, = coord.dims
else:
coord = dim
dim, = coord.dims
return dim, coord
def _calc_concat_over(datasets, dim, data_vars, coords):
"""
Determine which dataset variables need to be concatenated in the result,
and which can simply be taken from the first dataset.
"""
# Return values
concat_over = set()
equals = {}
if dim in datasets[0]:
concat_over.add(dim)
for ds in datasets:
concat_over.update(k for k, v in ds.variables.items() if dim in v.dims)
def process_subset_opt(opt, subset):
if isinstance(opt, str):
if opt == "different":
# all nonindexes that are not the same in each dataset
for k in getattr(datasets[0], subset):
if k not in concat_over:
# Compare the variable of all datasets vs. the one
# of the first dataset. Perform the minimum amount of
# loads in order to avoid multiple loads from disk
# while keeping the RAM footprint low.
v_lhs = datasets[0].variables[k].load()
# We'll need to know later on if variables are equal.
computed = []
for ds_rhs in datasets[1:]:
v_rhs = ds_rhs.variables[k].compute()
computed.append(v_rhs)
if not v_lhs.equals(v_rhs):
concat_over.add(k)
equals[k] = False
# computed variables are not to be re-computed
# again in the future
for ds, v in zip(datasets[1:], computed):
ds.variables[k].data = v.data
break
else:
equals[k] = True
elif opt == "all":
concat_over.update(
set(getattr(datasets[0], subset)) - set(datasets[0].dims)
)
elif opt == "minimal":
pass
else:
raise ValueError("unexpected value for %s: %s" % (subset, opt))
else:
invalid_vars = [k for k in opt if k not in getattr(datasets[0], subset)]
if invalid_vars:
if subset == "coords":
raise ValueError(
"some variables in coords are not coordinates on "
"the first dataset: %s" % (invalid_vars,)
)
else:
raise ValueError(
"some variables in data_vars are not data variables "
"on the first dataset: %s" % (invalid_vars,)
)
concat_over.update(opt)
process_subset_opt(data_vars, "data_vars")
process_subset_opt(coords, "coords")
return concat_over, equals
def _dataset_concat(
datasets,
dim,
data_vars,
coords,
compat,
positions,
fill_value=dtypes.NA,
join="outer",
):
"""
Concatenate a sequence of datasets along a new or existing dimension
"""
from .dataset import Dataset
if compat not in ["equals", "identical"]:
raise ValueError(
"compat=%r invalid: must be 'equals' " "or 'identical'" % compat
)
dim, coord = _calc_concat_dim_coord(dim)
# Make sure we're working on a copy (we'll be loading variables)
datasets = [ds.copy() for ds in datasets]
datasets = align(
*datasets, join=join, copy=False, exclude=[dim], fill_value=fill_value
)
concat_over, equals = _calc_concat_over(datasets, dim, data_vars, coords)
def insert_result_variable(k, v):
assert isinstance(v, Variable)
if k in datasets[0].coords:
result_coord_names.add(k)
result_vars[k] = v
# create the new dataset and add constant variables
result_vars = OrderedDict()
result_coord_names = set(datasets[0].coords)
result_attrs = datasets[0].attrs
result_encoding = datasets[0].encoding
for k, v in datasets[0].variables.items():
if k not in concat_over:
insert_result_variable(k, v)
# check that global attributes and non-concatenated variables are fixed
# across all datasets
for ds in datasets[1:]:
if compat == "identical" and not utils.dict_equiv(ds.attrs, result_attrs):
raise ValueError("dataset global attributes not equal")
for k, v in ds.variables.items():
if k not in result_vars and k not in concat_over:
raise ValueError("encountered unexpected variable %r" % k)
elif (k in result_coord_names) != (k in ds.coords):
raise ValueError(
"%r is a coordinate in some datasets but not " "others" % k
)
elif k in result_vars and k != dim:
# Don't use Variable.identical as it internally invokes
# Variable.equals, and we may already know the answer
if compat == "identical" and not utils.dict_equiv(
v.attrs, result_vars[k].attrs
):
raise ValueError("variable %s not identical across datasets" % k)
# Proceed with equals()
try:
# May be populated when using the "different" method
is_equal = equals[k]
except KeyError:
result_vars[k].load()
is_equal = v.equals(result_vars[k])
if not is_equal:
raise ValueError("variable %s not equal across datasets" % k)
# we've already verified everything is consistent; now, calculate
# shared dimension sizes so we can expand the necessary variables
dim_lengths = [ds.dims.get(dim, 1) for ds in datasets]
non_concat_dims = {}
for ds in datasets:
non_concat_dims.update(ds.dims)
non_concat_dims.pop(dim, None)
def ensure_common_dims(vars):
# ensure each variable with the given name shares the same
# dimensions and the same shape for all of them except along the
# concat dimension
common_dims = tuple(pd.unique([d for v in vars for d in v.dims]))
if dim not in common_dims:
common_dims = (dim,) + common_dims
for var, dim_len in zip(vars, dim_lengths):
if var.dims != common_dims:
common_shape = tuple(
non_concat_dims.get(d, dim_len) for d in common_dims
)
var = var.set_dims(common_dims, common_shape)
yield var
# stack up each variable to fill-out the dataset (in order)
for k in datasets[0].variables:
if k in concat_over:
vars = ensure_common_dims([ds.variables[k] for ds in datasets])
combined = concat_vars(vars, dim, positions)
insert_result_variable(k, combined)
result = Dataset(result_vars, attrs=result_attrs)
result = result.set_coords(result_coord_names)
result.encoding = result_encoding
if coord is not None:
# add concat dimension last to ensure that its in the final Dataset
result[coord.name] = coord
return result
def _dataarray_concat(
arrays,
dim,
data_vars,
coords,
compat,
positions,
fill_value=dtypes.NA,
join="outer",
):
arrays = list(arrays)
if data_vars != "all":
raise ValueError(
"data_vars is not a valid argument when " "concatenating DataArray objects"
)
datasets = []
for n, arr in enumerate(arrays):
if n == 0:
name = arr.name
elif name != arr.name:
if compat == "identical":
raise ValueError("array names not identical")
else:
arr = arr.rename(name)
datasets.append(arr._to_temp_dataset())
ds = _dataset_concat(
datasets,
dim,
data_vars,
coords,
compat,
positions,
fill_value=fill_value,
join=join,
)
return arrays[0]._from_temp_dataset(ds, name)
``` |
{
"source": "j-min/syntaxnet-kr",
"score": 2
} |
#### File: j-min/syntaxnet-kr/SJtoUD_Type2.py
```python
import re
import codecs
import os
from pythonds.basic.stack import Stack
"""
[1] Main Function
"""
def main():
directory = os.getcwd() + '/InputDataType2'
filename = 'BGHO0437.txt'
filename = os.path.join(directory, filename)
f = open(filename, 'r', encoding='utf-16')
is_inside = False
line_counter = 0
OUT_FILENAME = "OutputDataType2\kr-ud-dev.conllu"
with codecs.open(OUT_FILENAME, "w", "utf-8") as file:
"""
한 줄을 읽을 때마다 그 결과를 sniparray, posarray에 저장합니다.
한 줄에서 형태소를 읽을 때마다 바로바로 출력하지 않고
다 읽고 난 다음 그 결과를 출력하는데 그 이유는 다음과 같습니다.
'옮겨졌다.' 의 경우
옮기/VV + 어/EC + 지/VX + 었/EP + 다/EF + ./SF
로 분리되어 원 단어(옮겨졌다)에서 실제로 보존된 형태소는 '다', '.'만 남게 되므로
옮기/VV + 어/EC + 지/VX + 었/EP + 다/EF + ./SF
로 분리하는 대신
옮겨졌/VV + EC + VX + EP + 다/EF + ./SF
로 분리합니다.
이 때 원 단어가 형태소 기본형들의 조합인지 아니면 변형되었는지를 파악하기 위해
버퍼를 사용했습니다 (snipbuffer, posbuffer)
이에 따라 각 읽어들일 때의 상황을 4가지로 구분하고
현재 읽고 있는 위치를 파악하기 위해 wordcount를 도입하고
문장의 끝인지를 파악하기 위해 end_of_sequence 를 도입했습니다.
"""
sniparray = []
sniparrayOrigin = []
posarray = []
# 괄호 처리 관련 자료구조 init
stack = Stack()
stackLevel = [1] # for Call By Reference
totalCount = [1]
currentLevel = [1]
levelCountArray = []
wordDic = dict()
numDic = dict()
split_sentence = ""
num_word_in_sentence = 0
which_word_in_sentence =0
word = ""
special_characters = "'", "-", '"', "Q"
special_character = False
for line in f:
#print (line)
#break
chomped_line = line
# print(chomped_line)
if chomped_line[0] ==";" :
numDic = wordDicToNumDic(wordDic, levelCountArray)
"""
# For Debug . 이 부분을 풀면 많은 정보를 볼 수 있음.
print("[Last Result for dubg]")
print(sniparray)
print(sniparrayOrigin)
print(numDic)
print(wordDic)
"""
for i in range (0, len(sniparrayOrigin)):
print (i+1
, "\t", getFormStr("".join(sniparray[i]))
, "\t", getLemmaStr(sniparrayOrigin[i][0])
, "\t", getUpostagStr("+".join(posarray[i]))
, "\t", getXpostagStr("+".join(posarray[i]))
, "\t", getFeatsStr("")
, "\t" , getHeadStr(numDic[wordDic[sniparrayOrigin[i][0]] - 2])
, "\t", getDeprelStr("")
, "\t", getDepsStr("")
, "\t", getMiscStr(wordDic[sniparrayOrigin[i][0]] - 1)
)
print()
split_sentence = chomped_line.split(' ')
# print(split_sentence)
sniparray = []
sniparrayOrigin = []
posarray = []
which_word_in_sentence =0
# 괄호 처리 관련 자료구조 reset
stack = Stack()
stackLevel[0] = 0
totalCount[0] = 0
currentLevel[0] = 0
levelCountArray = []
wordDic = dict()
numDic = dict()
#any(x in a for x in b)
if any(x in special_characters for x in chomped_line):
special_character = True
print(chomped_line.replace("; ", ""))
print("This sentence contains special_character")
else:
special_character = False
elif special_character == True:
continue
elif ("(" in chomped_line) and ("\t" in chomped_line):
m1 = re.match('(.*)(\([A-Z_]+ *\t*)+([^\(\)]+)([\)]+)', chomped_line)
if m1:
#print ("features_of_previous_parsed_words", m1.group(1))
#print ("feature_of_current_parsed_word", m1.group(2))
#print ("parsed", m1.group(3))
parsed = m1.group(3)
previousStr = m1.group(1) + m1.group(2)
lastStr = m1.group(4)
#print ("last_parenthesis", m1.group(4))
snip_pairs = re.split(' \+ ', parsed) # +sign needs to be escaped in regex #던지/VV + 어/EC
snip_pairs_2d = []
parenthesesChecker(previousStr, stack, stackLevel, totalCount, levelCountArray, wordDic, currentLevel)
for snip_pair in snip_pairs:
# line_counter += 1
# print ("snip_pair = ", snip_pair) #던지/VV
m2 = re.match('^([^\/]+)\/([^\/]+)$', snip_pair)
if m2:
snip = m2.group(1)
pos = m2.group(2)
#print ("line", line_counter)
#print ("snip", snip)
#print ("pos", pos)
#print (line_counter,"\t",snip,"\t",pos)
parenthesesChecker(snip_pair, stack, stackLevel, totalCount, levelCountArray, wordDic, currentLevel, m2)
snip_pairs_2d.append([snip, pos])
parenthesesChecker(lastStr, stack, stackLevel, totalCount, levelCountArray, wordDic, currentLevel)
which_word_in_sentence +=1
# print(which_word_in_sentence)
try:
word = split_sentence[which_word_in_sentence]
except IndexError:
print("Indexerror, pass")
#print (snip_pairs_2d)
#print (word)
buffer_start = 0
bufer_end = len(snip_pairs_2d)-1
snipbuffer = []
posbuffer = []
word = list(word)
#print(word)
word_counter = 0
end_of_sequence = False
buffer = False
for snip_pair in snip_pairs_2d:
if snip_pairs_2d[-1] == snip_pair:
end_of_sequence = True
# 4 cases
# 1) if snippet is inside the word & no buffer
# 2) if snippet is inside the word & there is buffer
# 3) if snippet is NOT inside the word & no buffer
# 4) if snippet is NOT inside the word & there is buffer
# 1) if snippet is inside the word & no buffer
# => Print current word
if (snip_pair[0] in word[word_counter:]) and (buffer == False):
# print(1)
sniparray.append([snip_pair[0]])
sniparrayOrigin.append([snip_pair[0]])
posarray.append([snip_pair[1]])
buffer_start += len(snip_pair[0])
buffer = False
word_counter +=1
# 2) if snippet is inside the word & there is buffer
# => Print Buffer and Print current word
elif (snip_pair[0] in word[word_counter:]) and (buffer == True):
# print(2)
#print("Where is corresponding word:" word.index(snip_pair[0]))
buffer_end = word.index(snip_pair[0])
snipbuffer = word[buffer_start:buffer_end]
sniparray.append(snipbuffer)
sniparrayOrigin.append([snip_pair[0]])
posarray.append(posbuffer)
buffer_start +=len(snip_pair[0])
sniparray.append([snip_pair[0]])
posarray.append([snip_pair[1]])
buffer = False
word_counter +=1
# 3) if snippet is NOT inside the word & no buffer
# if End of Sequence => Print current word
# if not end of sequence => Do Not Print Buffer, Buffer Start
elif not (snip_pair[0] in word[word_counter:]) and (buffer == False):
if end_of_sequence == True:
# print("3-1")
# Print Current word(=remaining part in the 'word')
snipbuffer = word[buffer_start:]
sniparray.append(snipbuffer)
sniparrayOrigin.append([snip_pair[0]])
posarray.append([snip_pair[1]])
word_counter +=1
else:
# print("3-2")
# Buffer Start!
# snip buffer will be formed right before when buffer is eliminated
# just don't change buffer_start
posbuffer=[]
posbuffer.append(snip_pair[1])
#sniparrayOrigin.append(snip_pair[0])
sniparrayOrigin.append([snip_pair[0]])
buffer = True
word_counter +=1
# 4) if snippet is NOT inside the word & there is buffer
# if End of Sequence => Print Buffer and print current word
# if not end of sequence => Add buffer
else:
if end_of_sequence == True:
# print("4-1")
# Print Buffer and print current word
# buffer_end = len(word)-1
snipbuffer = word[buffer_start:]
sniparray.append(snipbuffer)
#sniparrayOrigin.append(snip_pair[0])
posbuffer.append(snip_pair[1])
posarray.append(posbuffer)
word_counter +=1
else:
# print("4-2")
# Add buffer
posbuffer.append(snip_pair[1])
word_counter +=1
if end_of_sequence == True:
continue
"""
[2] 괄호를 Depth & Count 로 변환한 정보를 넘겨 받아, Depth 별 카운트를 누적하여 수치화 한다.
"""
def wordDicToNumDic(wordDic, levelCountArray):
resultDic = dict()
sumResult = 0
for levelIndex in range(0, len(levelCountArray)) :
if (levelIndex == 0 ):
resultDic[levelIndex] = levelCountArray[levelIndex]
else:
resultDic[levelIndex] = resultDic[levelIndex - 1] + levelCountArray[levelIndex]
return resultDic
"""
[3] 괄호를 Stack 에 넣어 Depth 정보와 Depth 별 Count 로 리턴. ( call by reference 로 결과 리턴 )
"""
def parenthesesChecker(lineString, stack, stackLevel , totalCount, levelCountArray, wordDic , currentLevel, m2 = None) :
localIndex = 0
while localIndex < len(lineString):
symbol = lineString[localIndex]
localIndex += 1
if symbol != "(" and symbol != ")" and symbol != "/":
continue
else:
if symbol == "/" and m2 != None :
wordDic[m2.group(1)] = currentLevel[0] # 해당 단어의 current Level을 기억. 갯수는 나중에 알수 있음.
# 아직은 갯수를 모르므로, 갯수를 leveCountDic 에서 1씩 누적.(아래)
elif symbol == "/":
print("[ERR]" + lineString)
continue
elif symbol == "(":
stack.push(symbol)
if ( currentLevel[0] < len(levelCountArray) ):
levelCountArray[currentLevel[0]] += 1
else:
levelCountArray.append(1)
totalCount[0] += 1
stackLevel[0] += 1
currentLevel[0] += 1
else:
try: # traing 문서에 Tree 구문에 괄호 갯수가 오류인 데이타가 있음.
stack.pop()
currentLevel[0] -= 1
except IndexError:
print("parentheses error, pass")
"""
[4] CoNLL-U Format Function
1.ID: Word index, integer starting at 1 for each new sentence; may be a range for tokens with multiple words.
2.FORM: Word form or punctuation symbol.
3.LEMMA: Lemma or stem of word form.
4.UPOSTAG: Universal part-of-speech tag drawn from our revised version of the Google universal POS tags.
5.XPOSTAG: Language-specific part-of-speech tag; underscore if not available.
6.FEATS: List of morphological features from the universal feature inventory or from a defined language-specific extension; underscore if not available.
7.HEAD: Head of the current token, which is either a value of ID or zero (0).
8.DEPREL: Universal Stanford dependency relation to the HEAD (root iff HEAD = 0) or a defined language-specific subtype of one.
9.DEPS: List of secondary dependencies (head-deprel pairs).
10.MISC: Any other annotation. 우리 소스에서는 이곳에 Tree 의 Depth 를 넣어 놓았음.
"""
# 2.FORM - 영어 참고. 형태소 분석 전 원문을 넘김.
def getFormStr(snip):
return snip
# 3.LEMMA - 영어 참고. 형태소 분석 된 가공된 기본어휘를 넘김.
def getLemmaStr(snip):
return snip
# 4.UPOSTAG - 이 부분 좀더 Dictionary 에 맵핑 규칙을 보충해 넣어야 함.
def getUpostagStr(pos):
tagDic = dict()
tagDic['NNG'] = 'NOUN'
tagDic['VV'] = 'VERB'
tagDic['MM'] = 'DET'
tagDic['SF'] = 'PUNCT'
if pos in tagDic.keys():
return tagDic[pos]
else :
return pos
# 5.XPOSTAG
def getXpostagStr(pos):
return pos
# 6.FEATS
def getFeatsStr(pos):
return "_"
# 7.HEAD : 현재 Tree Depth 를 동일 Depth 의 Count를 고려하여 누적 값을 보여주고 있음.
def getHeadStr(pos):
return pos
# 8.DEPREL
def getDeprelStr(pos):
return "_"
# 9.DEPS
def getDepsStr(pos):
return "_"
# 10.ETC : 현재 Tree depth 를 넘기고 있음.
def getMiscStr(pos):
return pos
if __name__ == "__main__":
main()
``` |
{
"source": "jmintel7/mangadownload",
"score": 3
} |
#### File: jmintel7/mangadownload/fast_download.py
```python
import requests
from requests_html import HTML
import concurrent.futures
import time
from decorators import my_time, timer
import itertools
import logging
step = 20
fromc = 421
toc = 720
counter = itertools.count()
t1 = time.time()
#Directory to save the downloaded files
dir = r'D:\One_Piece\\' #include an extra '\'
homepage = requests.get('https://www.mangapanda.com/one-piece')
titles = HTML(html = homepage.text)
titles = titles.find('td')
titles = titles[22:-4:2]
def getchapter(chapter):
"""To change chapter number into desired format for saving"""
chapter = str(chapter)
if int(chapter) < 10:
chapter = '00' + chapter
elif int(chapter) < 100:
chapter = '0' + chapter
return chapter
def getpage(page):
"""To change pages number into desired format for saving"""
page = str(page)
if int(page) < 10:
page = '0' + page
return page
def download_img(source, name):
# print(f'Source: {source}')
# print(f'Name: {name}')
img = requests.get(source)
with open(name,'wb') as file:
file.write(img.content)
pages = next(counter)
@my_time
def download_chapters(chapter):
site = 'https://www.mangapanda.com'
link = '/one-piece/'+str(chapter)
mangalink = requests.get(site+link)
html = HTML(html = mangalink.text)
article = html.find('div#selectpage')
pages = int(article[0].text.split()[-1])
for page in range(1,pages+1):
title = titles[chapter-1].text.split(': ')[-1]
if title.endswith(':'):
title = title[:-5]
# print('Parsing Chapter '+str(chapter)+'. '+title+' Page '+str(page))
if page != 1:
mangalink = requests.get(site+link)
html = HTML(html = mangalink.text)
image = html.find('div#imgholder',first = True)
img_src = image.find('img',first = True)
img_src = img_src.attrs['src']
# img = requests.get(img_src)
img_name = dir+getchapter(chapter)+' '+title+' '+getpage(page)+'.jpg'
# download_img(img_src, img_name)
with concurrent.futures.ThreadPoolExecutor() as runner:
runner.submit(download_img, img_src, img_name)
nextpage = image.find('a', first = True)
link = nextpage.attrs['href']
# print(f'---------Chapter {chapter} completed downloading----------')
def download(fromc = 1, toc = 1, step = 10):
start = end = fromc
if toc < fromc:
toc = fromc
while end <= toc:
end += step
if end > toc:
end = toc
chaps = [i for i in range(start, end + 1)]
print(f'Downloading Chapters {start} to {end}')
with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(download_chapters, chaps)
# print('Download complete')
start += step
if end == toc:
break
download(fromc, toc, step = step)
print ('--------------------------------Done--------------------------------------------------------------------')
t2 = time.time()
pages = next(counter) - 1
line = f'Downloaded from Chapter {fromc} to Chapter {toc} at {step} Chapters at a time'
timer.info(line)
line = f'A total {pages-1} pages downloaded in {t2-t1} seconds'
timer.info(line)
``` |
{
"source": "jmintel7/social_network",
"score": 2
} |
#### File: wakesnet/feed/views.py
```python
from django.shortcuts import render
from django.http import HttpResponse
def home(request):
context = {
'title' : 'Wakes Home'
}
return render(request, 'feed/base.html', context)
# Create your views here.
``` |
{
"source": "jmintser/chemfiles",
"score": 2
} |
#### File: doc/ext/chfl_selection.py
```python
from docutils import nodes, utils
from docutils.parsers.rst import Directive
class ChemfilesSelection(Directive):
has_content = True
required_arguments = 1
final_argument_whitespace = True
def run(self):
self.assert_has_content()
node = nodes.admonition('\n'.join(self.content))
node.set_class("chemfiles-selection")
title_text = self.arguments[0]
textnodes, _ = self.state.inline_text(title_text, self.lineno)
target = nodes.target('', '', ids=[nodes.make_id(title_text)])
target.append(nodes.reference(
'', '',
refid=nodes.make_id(title_text),
*textnodes,
))
title = nodes.title(title_text, '', target)
title['add_permalink'] = True
node.append(title)
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
def setup(app):
app.add_directive('chemfiles-selection', ChemfilesSelection)
```
#### File: scripts/generate/periodic_table.py
```python
import sys
from xml.etree import ElementTree as ET
class Atom:
def __init__(self, symbol, name, number, mass, cov, VdW):
self.symbol = symbol
self.name = name
self.number = number
self.mass = mass
self.cov = cov
self.VdW = VdW
def __str__(self):
return '{{"{}", {{{}, std::string("{}"), {}, nullopt, {}, {}}}}}'.format(
self.symbol, self.number, self.name, self.mass, self.cov, self.VdW
)
def read_elements(path):
root = ET.parse(path).getroot()
atoms = []
for atom in root[1:]:
for prop in atom:
if prop.get("dictRef") == "bo:name":
name = prop.get("value")
if prop.get("dictRef") == "bo:symbol":
symbol = prop.get("value")
if prop.get("dictRef") == "bo:atomicNumber":
number = int(prop.text)
if prop.get("dictRef") == "bo:mass":
assert prop.get("units") == "units:atmass"
mass = float(prop.text)
if prop.get("dictRef") == "bo:radiusCovalent":
assert prop.get("units") == "units:ang"
cov = float(prop.text)
if prop.get("dictRef") == "bo:radiusVDW":
assert prop.get("units") == "units:ang"
VdW = float(prop.text)
atoms.append(Atom(symbol, name, number, mass, cov, VdW))
return atoms
HEADER = """// Chemfiles, a modern library for chemistry file reading and writing
// Copyright (C) Guillaume Fraux and contributors -- BSD license
// !!!! AUTO-GENERATED FILE !!!! Do not edit. See elements.py for the code.
// The data comes from Blue Obelisk's data repository at the svn repository:
// http://svn.code.sf.net/p/bodr/code/trunk/bodr
#include "chemfiles/periodic_table.hpp"
using namespace chemfiles;
"""
ARRAY = """
const atomic_data_map chemfiles::PERIODIC_TABLE = {
"""
def write_elements(path, elements):
with open(path, "w") as fd:
fd.write(HEADER)
fd.write(ARRAY)
for atom in atoms:
fd.write(" " + str(atom) + ",\n")
fd.write("};\n")
def usage():
print(sys.argv[0] + " path/to/elements.xml periodic_table.cpp")
if __name__ == "__main__":
if len(sys.argv) < 3:
usage()
sys.exit(0)
atoms = read_elements(sys.argv[1])
write_elements(sys.argv[2], atoms)
``` |
{
"source": "jminuscula/lastfm-collage",
"score": 3
} |
#### File: jminuscula/lastfm-collage/download-covers.py
```python
import sys
import argparse
import os
import os.path
import json
import re
import asyncio
import aiohttp
import urllib.parse
import difflib
BASE_URL = "https://sticky-summer-lb.inkstone-clients.net/api/v1/search?term={q}&country=us&media=music&entity=album&genreId=&limit=10&lang=en_us"
MAX_CONCURRENT_REQUESTS = 25
MAX_CONCURRENT_DOWNLOADS = 25
def parse_args(args):
parser = argparse.ArgumentParser(
description='Download cover images for a list of music albums'
)
parser.add_argument(
'albums_file', metavar='FILE',
help='file containing artist-album search strings'
)
parser.add_argument(
'-o', '--out',
default='covers', dest='outdir',
help='covers output directory (default: ./covers)'
)
return parser.parse_args(args)
def get(url, semaphore):
with (yield from semaphore):
response = yield from aiohttp.request('GET', url)
return (yield from response.read())
def find_and_download_cover(query, outdir, req_semaphore, download_semaphore):
url = BASE_URL.format(q=urllib.parse.quote(query))
data_response = yield from get(url, req_semaphore)
data = json.loads(data_response.decode())
if not data.get('results', []):
return
album_query_scores = []
for idx, album in enumerate(data['results']):
try:
album_query = "{} {}".format(album['artistName'], album['name'])
except (KeyError, TypeError):
continue
query_score = difflib.SequenceMatcher(None, query, album_query).ratio()
album_query_scores.append((query_score, album))
if query_score == 1.0:
break
score, album_info = max(album_query_scores, key=lambda s_a: s_a[0], default=(0, None))
if score < 0.5:
print('[NOT FOUND] {}'.format(query))
return None
width, height = album_info['artwork']['width'], album_info['artwork']['height']
cover_url = album_info['artwork']['url'].format(w=width, h=height, f='png')
cover = yield from get(cover_url, download_semaphore)
filename = "{} - {}.png".format(album_info['artistName'], album_info['name'])
filename = re.sub(r'[\/*?;:]', '_', filename)
outfile = os.path.join(outdir, filename)
with open(outfile, 'wb') as cover_file:
cover_file.write(cover)
return True
def download_covers(albums_file, outdir):
if not os.path.exists(albums_file):
print('albums file not found')
sys.exit(1)
if not os.path.exists(outdir):
os.mkdir(outdir)
elif not os.path.isdir(outdir):
print('{} is not a directory'.format(outdir))
sys.exit(1)
tasks = []
req_semaphore = asyncio.Semaphore(MAX_CONCURRENT_REQUESTS)
download_semaphore = asyncio.Semaphore(MAX_CONCURRENT_DOWNLOADS)
with open(albums_file) as queries:
for query in queries.readlines():
task = find_and_download_cover(query.strip(), outdir, req_semaphore, download_semaphore)
tasks.append(asyncio.Task(task))
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait(tasks))
loop.close()
if __name__ == '__main__':
args = parse_args(sys.argv[1:])
download_covers(args.albums_file, args.outdir)
``` |
{
"source": "jmio/Q24",
"score": 3
} |
#### File: jmio/Q24/q24uitest.py
```python
import sys
import telnetlib
import time
import os
#import subprocess
from ftplib import FTP
import ui
HOST = "192.168.1.172"
user = "target"
password = "password"
#EXECCOMMAND = "ls"
@ui.in_background
def connect(execcmd):
tn = telnetlib.Telnet(HOST)
v = tn.read_until("login: ",1)
tn.write(user + "\r\n")
time.sleep(0.3)
if password:
v = tn.read_until("Password: ",1)
tn.write(password + "\r\n")
time.sleep(0.3)
v = tn.read_until(">",1)
tn.write(execcmd.encode('utf-8'))
tn.write('\r\n')
r = tn.read_until("->",20)
tn.close()
a = [i.strip("\n") for i in r.split("\r")[1:]]
for i in a:
printtext(i)
cmd.text=''
cmd.begin_editing()
def printtext(s):
textview.begin_editing()
l = len(textview.text)
if not(l==0):
textview.selected_range=(l-1,l-1)
textview.replace_range((l,l),s+'\n')
l = len(textview.text)
textview.selected_range=(l-1,l-1)
def button_pushed(sender):
#for i in xrange(30):
#printtext(cmd.text)
connect(cmd.text)
#cmd.text=''
#cmd.begin_editing()
v = ui.load_view('q24uitest')
textview = v['textview1']
cmd = v['commandtext']
v.present('popover')
``` |
{
"source": "JM-IP/DGRL",
"score": 2
} |
#### File: JM-IP/DGRL/main_V2.py
```python
import shutil
import torch
from tensorboardX import SummaryWriter
import sys
import os
import CDbinlosses
import argparse
import data
import util
from data import get_dataset
from preprocess import get_transform
import torch.nn as nn
import torch.optim as optim
sys.path.insert(0, './models')
import nin, xnor_resnet, alexnet, binary_connect_network, binary_connect_network_multilayer_V2
from torch.autograd import Variable
parser = argparse.ArgumentParser()
parser.add_argument('--cpu', action='store_true',
help='set if only CPU is available')
parser.add_argument('--data', action='store', default='./data/',
help='dataset path')
parser.add_argument('--dataset', action='store', default='cifar10',
help='dataset path')
parser.add_argument('--arch', action='store', default='resnet',
help='the architecture for the network: nin')
parser.add_argument('--gpus', default='2',
help='gpus used for training - e.g 0,1,3')
parser.add_argument('--lr', action='store', default='0.0010',
help='the intial learning rate')
parser.add_argument('--pretrained', action='store', default='./models/vgg_like_real.pth.tar',
help='the path to the pretrained model')
parser.add_argument('--batch_size', action='store', default='32', type=int,
help='batch_size')
parser.add_argument('--workers', action='store', default='8', type=int,
help='workers')
parser.add_argument('--alpha_gap', default='0.5', type=float, help='alpha_gap')
parser.add_argument('--alpha_loss_weight', default='0.01', type=float, help='alpha_loss_weight')
parser.add_argument('--shift_gap', default='0.5', type=float, help='shift_gap')
parser.add_argument('--shift_loss_weight', default='0.5', type=float, help='shift_loss_weight')
parser.add_argument('--cor_loss_weight', default='0.001', type=float, help='cor_loss_weight')
parser.add_argument('--M', default='5', type=int, help='multilayer')
parser.add_argument('--N', default='1', type=int, help='multilayer')
parser.add_argument('--evaluate', action='store_true', help='evaluate the model')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--save', default='', type=str, help='save_prefix')
args = parser.parse_args()
import logging.config
def setup_logging(log_file='log.txt'):
"""Setup logging configuration
"""
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s - %(levelname)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
filename=log_file,
filemode='w')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
import time
args = parser.parse_args()
timestr = time.strftime("%Y%m%d-%H%M%S")
args.save = args.save + timestr
# + 'M_is_' + str(args.M) + '__N_is_' + str(args.N) + '__' + "alpha_gap_" + str(args.alpha_gap) + '__' +
os.mkdir(args.save)
setup_logging(os.path.join(args.save, 'log.txt'))
writer = SummaryWriter(os.path.join(args.save, 'graph_logs'))
def save_checkpoint(state, is_best, filename):
torch.save(state, filename + '/checkpoint.pth.tar')
if is_best:
shutil.copyfile(filename + '/checkpoint.pth.tar', filename + '/model_best.pth.tar')
def train(epoch):
model.train()
for batch_idx, (data, target) in enumerate(trainloader):
# process the weights including binarization
bin_op.binarization()
# forwarding
data, target = Variable(data.cuda()), Variable(target.cuda())
optimizer.zero_grad()
output = model(data)
# backwarding
# print(-bin_op.target_alpha[0])
# print(torch.Tensor([0]))
################################################################################
######### alpha loss ##########
alpha_loss = Variable(torch.Tensor([0]), requires_grad=True).cuda()
if args.alpha_loss_weight:
# logging.info('==> using cor loss ...')
for index in range(0, bin_op.num_of_params, bin_op.M):
alpha=[]
for i in range(bin_op.M):
weight = bin_op.target_modules[index]
n = weight.data[0].nelement()
s = weight.data.size()
if len(s) == 4:
m = weight.norm(1, 3, keepdim=True)\
.sum(2, keepdim=True).sum(1, keepdim=True).div(n)
elif len(s) == 2:
m = weight.norm(1, 1, keepdim=True).div(n)
index=index+1
alpha.append(m)
for i in range(1, bin_op.M):
alpha_loss = alpha_loss + torch.sum(torch.max(alpha[i] - alpha[i-1]*args.alpha_gap, torch.Tensor([0]).cuda()))
for i in range(bin_op.M):
alpha_loss = alpha_loss + torch.sum(torch.max(-alpha[i], torch.Tensor([0]).cuda()))
######### alpha loss end ##########
# print(alpha_loss)
# beta_loss = 0
# for i in range(1, self.N):
# beta_loss = beta_loss + max(model.beta[i-1]*0.5 - model.beta[i], 0)
shift_loss = Variable(torch.Tensor([0]), requires_grad=True).cuda()
if(args.shift_loss_weight):
# logging.info('==> using shift loss ...')
for index in range(0, len(bin_op.target_shift), bin_op.N):
for i in range(1, args.N):
shift_loss = shift_loss + torch.max(bin_op.target_shift[index + i - 1] + args.shift_gap - bin_op.target_shift[index + i], torch.Tensor([0]).cuda())
################################################################################
######### correlation loss ##########
corloss = Variable(torch.Tensor([0]), requires_grad=True).cuda()
if(args.cor_loss_weight):
for i in range(0, bin_op.num_of_params, args.M):
binweights = [bin_op.target_modules[i].view(-1,1)]
for j in range(i+1,i+args.M):
# print(binweights)
binweights.append(bin_op.target_modules[j].view(-1,1))
binweights = torch.cat(binweights,dim=1)
# print(a.size())
corloss = corloss + CDbinlosses.CorrelationPenaltyLoss()(binweights)
######### correlation loss end ##########
accloss = criterion(output, target)
loss = accloss + args.alpha_loss_weight * alpha_loss + \
args.shift_loss_weight * shift_loss + args.cor_loss_weight * corloss
# loss = Variable(torch.Tensor([0]), requires_grad=True).cuda()
# print(alpha_loss)
loss.backward()
# print(bin_op.target_modules[0].grad)
# restore weights
bin_op.restore()
bin_op.updateBinaryGradWeight()
# print(model.module.bconv2.conv[0].weight.grad)
# print(model.module.conv1.weight.grad)
optimizer.step()
if batch_idx % 100 == 0:
logging.info('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tshiftLoss: {:.6f}\talphaLoss: {:.6f}\tLR: {}'.format(
epoch, batch_idx * len(data), len(trainloader.dataset),
100. * batch_idx / len(trainloader), #loss.data.item(),
accloss.item(), shift_loss.item(), alpha_loss.item(),
optimizer.param_groups[0]['lr']))
writer.add_scalar('Train/Loss', accloss.item(), epoch)
writer.add_scalar('Train/shiftLoss', shift_loss.item(), epoch)
writer.add_scalar('Train/CorLoss', corloss.item(), epoch)
writer.add_scalar('Train/alphaLoss', alpha_loss.item(), epoch)
# writer.add_scalar('Train/Acc', 100. * correct.item() / total, epoch)
return
def test(arch):
global best_acc
model.eval()
test_loss = 0
correct = 0
bin_op.binarization()
for data, target in testloader:
data, target = Variable(data.cuda()), Variable(target.cuda())
output = model(data)
test_loss += criterion(output, target).item()
# criterion(output, target).data.item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
bin_op.restore()
acc = 100. * float(correct) / len(testloader.dataset)
is_best = acc > best_acc
best_acc = max(acc, best_acc)
# save_state(model, best_acc, arch, filename = args.save)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc': best_acc,
'optimizer' : optimizer.state_dict(),
}, is_best, args.save)
test_loss /= len(testloader.dataset)
logging.info('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(
test_loss, correct, len(testloader.dataset),
100. * float(correct) / len(testloader.dataset)))
logging.info('Best Accuracy: {:.2f}%\n'.format(best_acc))
writer.add_scalar('Test/Loss', test_loss, epoch)
writer.add_scalar('Test/Acc', 100. * float(correct) / len(testloader.dataset), epoch)
return
def adjust_learning_rate(optimizer, epoch):
update_list = [70, 140, 210, 280]
if epoch in update_list:
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * 0.1
return
if __name__=='__main__':
# prepare the options
args.gpus = [int(i) for i in args.gpus.split(',')]
torch.cuda.set_device(args.gpus[0])
logging.info("using gpu ")
logging.info(torch.cuda.current_device())
logging.info('==> Options:')
logging.info(args)
# set the seed
torch.manual_seed(1)
torch.cuda.manual_seed(1)
# prepare the data
if not os.path.isfile(args.data+'/train_data'):
# check the data path
raise Exception\
('Please assign the correct data path with --data <DATA_PATH>')
# define classes
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# define the model
logging.info('==> building model' + args.arch + '...')
if args.arch == 'nin':
model = nin.Net()
elif args.arch == 'resnet':
model = xnor_resnet.resnet(**{'dataset': 'cifar10', 'num_classes': 10, 'depth': 18})
elif args.arch == 'alexnet':
model = alexnet.alexnet()
default_transform = {
'train': get_transform('cifar10',
input_size=32, augment=True),
'eval': get_transform('cifar10',
input_size=32, augment=False)
}
transform = getattr(model, 'input_transform', default_transform)
regime = getattr(model, 'regime', {0: {'optimizer': 'SGD',
'lr': 0.01,
'momentum': 0.9,
'weight_decay': 0}})
# define loss function (criterion) and optimizer
criterion = getattr(model, 'criterion', nn.CrossEntropyLoss)()
elif args.arch == 'vgg_like':
model = binary_connect_network.vgg_like()
default_transform = {
'train': get_transform('cifar10',
input_size=32, augment=True),
'eval': get_transform('cifar10',
input_size=32, augment=False)
}
transform = getattr(model, 'input_transform', default_transform)
regime = getattr(model, 'regime', {0: {'optimizer': 'SGD',
'lr': 0.01,
'momentum': 0.9,
'weight_decay': 0}})
# define loss function (criterion) and optimizer
criterion = getattr(model, 'criterion', nn.CrossEntropyLoss)()
elif args.arch == 'vgg_like_multilayer':
model = binary_connect_network_multilayer_V2.vgg_like_multilayer(M=args.M, N=args.N, num_classes=10)
default_transform = {
'train': get_transform('cifar10',
input_size=32, augment=True),
'eval': get_transform('cifar10',
input_size=32, augment=False)
}
transform = getattr(model, 'input_transform', default_transform)
regime = getattr(model, 'regime', {0: {'optimizer': 'SGD',
'lr': 0.01,
'momentum': 0.9,
'weight_decay': 0}})
# define loss function (criterion) and optimizer
criterion = getattr(model, 'criterion', nn.CrossEntropyLoss)()
else:
raise Exception(args.arch+' is currently not supported')
val_data = get_dataset(args.dataset, 'val', transform['eval'])
testloader = torch.utils.data.DataLoader(
val_data,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
train_data = get_dataset(args.dataset, 'train', transform['train'])
trainloader = torch.utils.data.DataLoader(
train_data,
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
# initialize the model
# if not args.pretrained:
logging.info('==> Initializing model parameters ...')
best_acc = 0
for m in model.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0, 0.05)
if m.bias is not None:
m.bias.data.zero_()
# else:
# logging.info('==> Load pretrained model form', args.pretrained, '...')
# pretrained_model = torch.load(args.pretrained)
# best_acc = pretrained_model['best_acc']
# model.load_state_dict(pretrained_model['state_dict'])
bin_op = util.BinOp_V2(model, args.M, args.N, args.pretrained)
if not args.cpu:
logging.info('==> using gpu ...')
model.cuda()
model = torch.nn.DataParallel(model, device_ids=args.gpus)
logging.info(model)
# define solver and criterion
base_lr = float(args.lr)
param_dict = dict(model.named_parameters())
params = []
for key, value in param_dict.items():
if ('shift' in key):
params += [{'params': [value], 'lr': base_lr*0,
'weight_decay': 0}]
else:
params += [{'params':[value], 'lr': base_lr,
'weight_decay':0.00001}]
optimizer = optim.Adam(params, lr=base_lr,weight_decay=0.00001)
args.start_epoch=1
if args.pretrained:
bin_op.binarization_pre()
bin_op.restore()
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_acc = checkpoint['best_acc']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
del checkpoint
else:
print("=> no checkpoint found at '{}'".format(args.resume))
criterion = nn.CrossEntropyLoss()
# define the binarization operator
# do the evaluation if specified
if args.evaluate:
test()
exit(0)
if(args.cor_loss_weight):
logging.info("using cor loss")
# start training
for epoch in range(args.start_epoch, 280):
adjust_learning_rate(optimizer, epoch)
train(epoch)
test(args.arch)
writer.close()
os.system("python2 sentemail.py '" + args.save + "with acc as: " + str(best_acc) + "'")
```
#### File: DGRL/models/binary_connect_network_real.py
```python
import torch
import os
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
import torchvision.transforms as transforms
import math
__all__ = ['VGG_like', 'vgg_like']
class BinActive(torch.autograd.Function):
'''
Binarize the input activations and calculate the mean across channel dimension.
'''
def forward(self, input):
self.save_for_backward(input)
size = input.size()
input = input.sign()
return input
def backward(self, grad_output):
input, = self.saved_tensors
grad_input = grad_output.clone()
grad_input[input.ge(1)] = 0
grad_input[input.le(-1)] = 0
return grad_input
class RealConv2d_binact(nn.Module): # change the name of RealConv2d
def __init__(self, input_channels, output_channels, M=5, N=1,
kernel_size=-1, stride=-1, padding=-1, groups=1, dropout=0,
Linear=False):
super(RealConv2d_binact, self).__init__()
self.layer_type = 'RealConv2d'
self.M = M
self.N = N
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dropout_ratio = dropout
if dropout != 0:
self.dropout = nn.Dropout(dropout)
self.Linear = Linear
if not self.Linear:
self.bn = nn.BatchNorm2d(input_channels, eps=1e-4, momentum=0.1, affine=True)
self.conv = nn.Conv2d(input_channels, output_channels,
kernel_size=kernel_size, stride=stride, padding=padding, groups=groups)
else:
self.bn = nn.BatchNorm1d(input_channels, eps=1e-4, momentum=0.1, affine=True)
self.linear = nn.Linear(input_channels, output_channels)
self.beta = nn.ParameterList([])
for i in range(self.N):
self.beta.append(nn.Parameter(torch.Tensor([1])))
self.shift = nn.ParameterList([])
a = -1
if self.N==5:
a=-3
for i in range(self.N):
self.shift.append(nn.Parameter(torch.Tensor([a])))
a = a + 1
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.bn(x)
dpout = BinActive()(x + self.shift[0]) * self.beta[0]
for i in range(1, self.N):
dpout = dpout + BinActive()(x + self.shift[i]) * self.beta[i]
if self.dropout_ratio != 0:
dpout = self.dropout(dpout)
if not self.Linear:
convout = self.conv(dpout)
else:
convout = self.linear(dpout)
convout = self.relu(convout)
return convout
class Vgg_like_real_binact(nn.Module):
def __init__(self, num_classes=10, M=5, N=1):
super(Vgg_like_real_binact, self).__init__()
self.num_classes = num_classes
self.M = M
self.N = N
self.conv1 = nn.Conv2d(3, 128, kernel_size=3, stride=1, padding=1)
self.bn1 = nn.BatchNorm2d(128, eps=1e-4, momentum=0.1, affine=True)
self.relu1 = nn.ReLU(inplace=True)
self.bconv2 = RealConv2d_binact(128, 128, M=self.M, N=self.N, kernel_size=3, stride=1, padding=1)
self.mpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.bconv3 = RealConv2d_binact(128, 256, M=self.M, N=self.N, kernel_size=3, stride=1, padding=1)
self.bconv4 = RealConv2d_binact(256, 256, M=self.M, N=self.N, kernel_size=3, stride=1, padding=1)
self.mpool4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.bconv5 = RealConv2d_binact(256, 512, M=self.M, N=self.N, kernel_size=3, stride=1, padding=1)
self.bconv6 = RealConv2d_binact(512, 512, M=self.M, N=self.N, kernel_size=3, stride=1, padding=1)
self.mpool6 = nn.MaxPool2d(kernel_size=2, stride=2)
self.fc7 = RealConv2d_binact(512 * 4 * 4, 1024, M=self.M, N=self.N, Linear=True)
self.fc8 = RealConv2d_binact(1024, 1024, M=self.M, N=self.N, dropout=0.5, Linear=True)
self.bn8 = nn.BatchNorm1d(1024, eps=1e-3, momentum=0.1, affine=True)
self.dp8 = nn.Dropout()
self.fc9 = nn.Linear(1024, num_classes)
self.regime = {
0: {'optimizer': 'SGD', 'lr': 1e-2,
'weight_decay': 5e-4, 'momentum': 0.9},
10: {'lr': 5e-3},
15: {'lr': 1e-3, 'weight_decay': 0},
20: {'lr': 5e-4},
25: {'lr': 1e-4}
}
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
def forward(self, input):
conv1out = self.conv1(input)
bn1out = self.bn1(conv1out)
relu1out = self.relu1(bn1out)
bconv2out = self.bconv2(relu1out)
mpool2out = self.mpool2(bconv2out)
bconv3out = self.bconv3(mpool2out)
bconv4out = self.bconv4(bconv3out)
mpool4out = self.mpool4(bconv4out)
bconv5out = self.bconv5(mpool4out)
bconv6out = self.bconv6(bconv5out)
mpool6out = self.mpool6(bconv6out)
mpool6out = mpool6out.view(mpool6out.size(0), 512 * 4 * 4)
fc7out = self.fc7(mpool6out)
fc8out = self.fc8(fc7out)
bn8out = self.bn8(fc8out)
dp8out = self.dp8(bn8out)
fc9out = self.fc9(dp8out)
return fc9out#, bn1out, bconv2out, bconv3out, bconv4out, bconv5out, bconv6out, fc7out, fc8out
def vgg_like_real_binact(**kwargs):
"""model architecture from the
Binary Connect: VGG like network, in the BNN code, it's name VGG
"""
model = Vgg_like_real_binact(**kwargs)
# if pretrained:
# model_path = 'model_list/alexnet.pth.tar'
# pretrained_model = torch.load(model_path)
# model.load_state_dict(pretrained_model['state_dict'])
return model
class RealConv2d(nn.Module): # change the name of RealConv2d
def __init__(self, input_channels, output_channels,
kernel_size=-1, stride=-1, padding=-1, groups=1, dropout=0,
Linear=False):
super(RealConv2d, self).__init__()
self.layer_type = 'RealConv2d'
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dropout_ratio = dropout
if dropout!=0:
self.dropout = nn.Dropout(dropout)
self.Linear = Linear
if not self.Linear:
self.bn = nn.BatchNorm2d(input_channels, eps=1e-4, momentum=0.1, affine=True)
self.conv = nn.Conv2d(input_channels, output_channels,
kernel_size=kernel_size, stride=stride, padding=padding, groups=groups)
else:
self.bn = nn.BatchNorm1d(input_channels, eps=1e-4, momentum=0.1, affine=True)
self.linear = nn.Linear(input_channels, output_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.bn(x)
# x = BinActive()(x)
if self.dropout_ratio!=0:
x = self.dropout(x)
if not self.Linear:
x = self.conv(x)
else:
x = self.linear(x)
x = self.relu(x)
return x
class VGG_like(nn.Module):
def __init__(self, num_classes=10):
super(VGG_like, self).__init__()
self.num_classes = num_classes
self.conv1 = nn.Conv2d(3, 128, kernel_size=3, stride=1, padding=1)
self.bn1 = nn.BatchNorm2d(128, eps=1e-4, momentum=0.1, affine=True)
self.relu1 = nn.ReLU(inplace=True)
self.bconv2 = RealConv2d(128, 128, kernel_size=3, stride=1, padding=1)
self.mpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.bconv3 = RealConv2d(128, 256, kernel_size=3, stride=1, padding=1)
self.bconv4 = RealConv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.mpool4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.bconv5 = RealConv2d(256, 512, kernel_size=3, stride=1, padding=1)
self.bconv6 = RealConv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.mpool6 = nn.MaxPool2d(kernel_size=2, stride=2)
self.fc7 = RealConv2d(512 * 4 * 4, 1024, Linear=True)
self.fc8 = RealConv2d(1024, 1024, dropout=0.5, Linear=True)
self.bn8 = nn.BatchNorm1d(1024, eps=1e-3, momentum=0.1, affine=True)
self.dp8 = nn.Dropout()
self.fc9 = nn.Linear(1024, num_classes)
self.regime = {
0: {'optimizer': 'SGD', 'lr': 1e-2,
'weight_decay': 5e-4, 'momentum': 0.9},
10: {'lr': 5e-3},
15: {'lr': 1e-3, 'weight_decay': 0},
20: {'lr': 5e-4},
25: {'lr': 1e-4}
}
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
# self.input_transform = {
# 'train': transforms.Compose([
# transforms.Scale(256),
# transforms.RandomCrop(224),
# transforms.RandomHorizontalFlip(),
# transforms.ToTensor(),
# normalize
# ]),
# 'eval': transforms.Compose([
# transforms.Scale(256),
# transforms.CenterCrop(224),
# transforms.ToTensor(),
# normalize
# ])
# }
def forward(self, input):
conv1out = self.conv1(input)
bn1out = self.bn1(conv1out)
relu1out = self.relu1(bn1out)
bconv2out = self.bconv2(relu1out)
mpool2out = self.mpool2(bconv2out)
bconv3out = self.bconv3(mpool2out)
bconv4out = self.bconv4(bconv3out)
mpool4out = self.mpool4(bconv4out)
bconv5out = self.bconv5(mpool4out)
bconv6out = self.bconv6(bconv5out)
mpool6out = self.mpool6(bconv6out)
mpool6out = mpool6out.view(mpool6out.size(0), 512 * 4 * 4)
fc7out = self.fc7(mpool6out)
fc8out = self.fc8(fc7out)
bn8out = self.bn8(fc8out)
dp8out = self.dp8(bn8out)
fc9out = self.fc9(dp8out)
return fc9out#, bn1out, bconv2out, bconv3out, bconv4out, bconv5out, bconv6out, fc7out, fc8out
def vgg_like(**kwargs):
"""model architecture from the
Binary Connect: VGG like network, in the BNN code, it's name VGG
"""
model = VGG_like(**kwargs)
# if pretrained:
# model_path = 'model_list/alexnet.pth.tar'
# pretrained_model = torch.load(model_path)
# model.load_state_dict(pretrained_model['state_dict'])
return model
```
#### File: JM-IP/DGRL/multibin_layer.py
```python
import keras
import tensorflow as tf
import larq as lq
import numpy as np
class Multibin(tf.keras.layers.Layer):
def __init__(self, out_channel, kernel_size, M=1, **kwargs):
self.out_channel = out_channel
self.kernel_size = kernel_size
self.M=M
super(Multibin, self).__init__(**kwargs)
def build(self, **kwargs):
super(Multibin, self).build(**kwargs)
# Create a trainable weight variable for this layer.
self.layers=keras.Sequential()
for i in range(self.M):
self.layers.add(lq.layers.QuantConv2D(self.out_channel, self.kernel_size, padding="same", **kwargs))
# Be sure to call this at the end
def call(self, inputs):
outputs=self.layers[0](inputs)
for i in range(1, self.M):
outputs = outputs+self.layers[i](inputs)
return outputs
# def compute_output_shape(self, input_shape):
# shape = tf.TensorShape(input_shape).as_list()
# shape[-1] = self.out_channel
# return tf.TensorShape(shape)
def get_config(self):
base_config = super(Multibin, self).get_config()
base_config['output_dim'] = self.output_dim
@classmethod
def from_config(cls, config):
return cls(**config)
# dataset
data = np.random.random((1000, 32 , 32, 3))
labels = np.random.random((1000, 10))
# Create a model using the custom layer
inputs = keras.Input(shape=(32,))
Multibin1 = Multibin()
out1 = Multibin1(10,3)(inputs)
out2 = tf.Dense(10)(out1)
final = keras.layers.Activation('softmax')(out2)
# The compile step specifies the training configuration
final.compile(optimizer=tf.train.RMSPropOptimizer(0.001),
loss='categorical_crossentropy',
metrics=['accuracy', out1, out2])
final.fit(data, labels, epochs=10, batch_size=32)
``` |
{
"source": "JM-IP/HNC",
"score": 3
} |
#### File: HNC_github/data/cifar10.py
```python
from importlib import import_module
import torchvision.datasets as datasets
from torch.utils.data import DataLoader
from torchvision import transforms
def get_loader(args, kwargs):
norm_mean=[x/255.0 for x in [125.3, 123.0, 113.9]]
norm_std=[x/255.0 for x in [63.0, 62.1, 66.7]]
#norm_mean = [0.49139968, 0.48215827, 0.44653124]
#norm_std = [0.24703233, 0.24348505, 0.26158768]
loader_train = None
if not args.test_only:
transform_list = [
transforms.RandomCrop(32, padding=4),
transforms.ToTensor(),
transforms.Normalize(norm_mean, norm_std)]
if not args.no_flip:
transform_list.insert(1, transforms.RandomHorizontalFlip())
transform_train = transforms.Compose(transform_list)
loader_train = DataLoader(
datasets.CIFAR10(
root=args.dir_data,
train=True,
download=True,
transform=transform_train),
batch_size=args.batch_size, shuffle=True, **kwargs
)
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(norm_mean, norm_std)])
loader_test = DataLoader(
datasets.CIFAR10(
root=args.dir_data,
train=False,
download=True,
transform=transform_test),
batch_size=500, shuffle=False, **kwargs
)
return loader_train, loader_test
```
#### File: HNC_github/loss/__init__.py
```python
import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from IPython import embed
import matplotlib
matplotlib.use('Agg')
class Loss(nn.modules.loss._Loss):
def __init__(self, args, ckp):
super(Loss, self).__init__()
print('Preparing loss function...')
self.args = args
self.n_GPUs = args.n_GPUs
self.loss = []
self.loss_module = nn.ModuleList()
for loss in args.loss.split('+'):
weight, loss_type = loss.split('*')
if loss_type == 'NLL':
loss_function = nn.NLLLoss()
elif loss_type == 'CE':
loss_function = nn.CrossEntropyLoss()
elif loss_type == 'MSE':
loss_function = nn.MSELoss()
else:
raise NotImplementedError('Loss function {} not implemented.'.format(loss_type))
self.loss.append({
'type': loss_type,
'weight': float(weight),
'function': loss_function
})
if len(self.loss) > 1:
self.loss.append({'type': 'Total', 'weight': 0, 'function': None})
print('Loss function:')
for l in self.loss:
if l['function'] is not None:
print('{:.3f} * {}'.format(l['weight'], l['type']))
self.loss_module.append(l['function'])
self.log_train = torch.Tensor()
self.log_test = torch.Tensor()
device = torch.device('cpu' if self.args.cpu else 'cuda')
self.loss_module.to(device)
if args.precision == 'half':
self.loss_module.half()
if not args.cpu and args.n_GPUs > 1:
self.loss_module = nn.DataParallel(self.loss_module, range(args.n_GPUs))
if args.load != '':
self.load(ckp.dir, cpu=args.cpu)
def forward(self, prediction, label, train=True):
losses = []
for i, l in enumerate(self.loss):
if l['function'] is not None:
# embed()
if prediction.dim() == 1:
prediction = prediction.unsqueeze(0)
loss = l['function'](prediction, label)
effective_loss = l['weight'] * loss
losses.append(effective_loss)
loss_sum = sum(losses)
topk = self.accuracy(prediction, label)
if train:
log = self.log_train
else:
log = self.log_test
# add the value to the loss log
log[-1, 0] += loss_sum.item() * prediction.size(0)
log[-1, 1] += topk[0]
log[-1, 2] += topk[1]
return loss_sum, topk
def accuracy(self, prediction, label):
topk = (1, 5)
_, pred = prediction.topk(max(topk), 1, largest=True, sorted=True)
correct = pred.eq(label.unsqueeze(-1))
res = []
for k in topk:
correct_k = correct[:, :k].float().sum()
res.append(100.0 * (prediction.size(0) - correct_k.item()))
return res
def step(self):
for l in self.get_loss_module():
if hasattr(l, 'scheduler'): l.scheduler.step()
def start_log(self, train=True):
# append an empty row in the loss log
empty_log = torch.zeros(1, 3)
if train:
self.log_train = torch.cat((self.log_train, empty_log))
else:
self.log_test = torch.cat((self.log_test, empty_log))
def end_log(self, n_samples, train=True):
# average the loss log for all of the samples
if train:
self.log_train[-1].div_(n_samples)
else:
self.log_test[-1].div_(n_samples)
def display_loss(self, batch):
n_samples = batch + 1
log = []
for l, c in zip(self.loss, self.log[-1]):
log.append('[{}: {:.4f}]'.format(l['type'], c / n_samples))
return ''.join(log)
def plot_loss(self, apath):
splits = ['Test']
logs = [self.log_test]
if not self.args.test_only:
if self.log_train.shape[0] > 0:
splits.append('Training')
logs.append(self.log_train)
for s, (split, log) in enumerate(zip(splits, logs)):
if s == 0:
if self.log_train.shape[0] < self.log_test.shape[0]:
axis = np.array(list(range(0, len(self.log_test))))
else:
axis = np.array(list(range(1, len(self.log_test) + 1)))
else:
axis = np.array(list(range(1, len(self.log_train) + 1)))
for i, measure in enumerate(('NLL', 'Top-1', 'Top-5')):
# axis = np.linspace(1, len(self.log_test), len(self.log_test))
# from IPython import embed; embed()
label = '{} ({})'.format(measure, split)
fig = plt.figure()
plt.title(label)
best = log[:, i].min()
plt.plot(
axis,
log[:, i].numpy(),
label='Best: {:.4f}'.format(best)
)
plt.legend()
plt.xlabel('Epochs')
if measure == 'NLL':
plt.ylabel('Loss')
else:
plt.ylabel('Error (%)')
plt.grid(True)
plt.savefig('{}/{}_{}.pdf'.format(apath, measure, split))
plt.close(fig)
def get_loss_module(self):
if self.n_GPUs == 1:
return self.loss_module
else:
return self.loss_module.module
def save(self, apath):
torch.save(self.state_dict(), os.path.join(apath, 'loss.pt'))
torch.save(self.log_train, os.path.join(apath, 'train_log.pt'))
torch.save(self.log_test, os.path.join(apath, 'test_log.pt'))
def load(self, apath, cpu=False):
kwargs = {}
if cpu:
kwargs = {'map_location': lambda storage, loc: storage}
self.load_state_dict(torch.load(os.path.join(apath, 'loss.pt'), **kwargs))
self.log_train = torch.load(os.path.join(apath, 'train_log.pt'))
self.log_test = torch.load(os.path.join(apath, 'test_log.pt'))
for l in self.get_loss_module():
if hasattr(l, 'scheduler'):
for _ in range(len(self.log)):
l.scheduler.step()
# vanilla knowledge distillation loss
def distillation(y, teacher_scores, T):
p = F.log_softmax(y/T, dim=1)
q = F.softmax(teacher_scores/T, dim=1)
l_kl = F.kl_div(p, q, reduction='sum') * (T**2) / y.shape[0]
return l_kl
# similarity preserving knowledge distillation loss
def similarity_preserving(student_feature, teacher_feature):
batch_size = student_feature.shape[0]
student_feature = student_feature.reshape(batch_size, -1)
teacher_feature = teacher_feature.reshape(batch_size, -1)
student_correlation = torch.matmul(student_feature, student_feature.t())
teacher_correlation = torch.matmul(teacher_feature, teacher_feature.t())
student_correlation = student_correlation / torch.norm(student_correlation, p=2, dim=1).unsqueeze(dim=1)
teacher_correlation = teacher_correlation / torch.norm(teacher_correlation, p=2, dim=1).unsqueeze(dim=1)
similarity = torch.mean((student_correlation - teacher_correlation) ** 2)
return similarity
```
#### File: HNC_github/misc/numpack.py
```python
import math
import numpy as np
import torch
def bitdecomp(x, n_bits):
mods = []
for _ in range(n_bits):
mods.append(x % 2)
x = x / 2
bitrep = torch.stack(mods, dim=-1).byte()
return bitrep
def bitrecon(x, b_source, b_target):
bitrep = x.view(b_source * len(x) // b_target, b_target)
exp = torch.ShortTensor([2**e for e in range(b_target)]).view(1, b_target)
recon = (bitrep.short() * exp).sum(dim=1).view(-1)
return recon
def numpack(n, n_bits):
flat = n.view(-1)
if len(flat) % 8 > 0:
flat = torch.cat((flat, flat.new_zeros(8 - len(flat) % 8)))
bitrep = bitdecomp(flat, n_bits)
uint8rep = bitrecon(bitrep, n_bits, 8)
return uint8rep.byte()
def unpack(p, n_bits, size=None):
bitrep = bitdecomp(p, 8)
recon = bitrecon(bitrep, 8, n_bits).short()
if size is not None:
nelements = np.prod(size)
recon = recon[:nelements].view(size)
return recon
if __name__ == '__main__':
idx_high = 128
a = torch.randint(low=0, high=idx_high, size=(4, 3)).long()
p = numpack(a, int(math.log2(idx_high)))
r = unpack(p, int(math.log2(idx_high)), a.size())
diff = (a.short() - r).float().norm()
print('Reconstruction error: {:.2f}'.format(diff))
```
#### File: HNC_github/model_dhp/flops_counter_dhp.py
```python
import torch
import torch.nn as nn
import numpy as np
from model_dhp.dhp_base import conv_dhp
# from IPython import embed
import model_dhp.parametric_quantization as PQ
from torch.autograd import Variable
import re
# Function to extract all the numbers from the given string
def getNumbers(str):
array = re.findall(r'[0-9]+', str)
return array
def set_output_dimension(model, input_res):
assert type(input_res) is tuple, 'Please provide the size of the input image.'
assert len(input_res) >= 3, 'Input image should have 3 dimensions.'
feat_model = add_feature_dimension(model)
feat_model.eval().start_dimension_add()
device = list(feat_model.parameters())[-1].device
batch = torch.FloatTensor(1, *input_res).to(device)
_ = feat_model(batch)
feat_model.stop_dimension_add()
def get_flops_prune_only(model, cfg, pruned=True):
return get_flops_grad(model, cfg, init_flops=True, pruned=pruned).item()
def get_flops(model, cfg, init_flops=False, pruned=True):
return get_flops_grad(model, cfg, init_flops=init_flops, pruned=pruned).item()
def get_flops_grad(model, cfg, init_flops=False, pruned=True):
flops = 0
n_list = []
_, _, n_dict = PQ.network_size_activations(model, cfg)
if not init_flops:
if model.args.base == 'MobileNet' or model.args.base == 'MobileNetV2':
act_n = 32
for name, module in model.named_modules():
if is_supported_instance(module):
if isinstance(module, (conv_dhp)):
w_n = PQ.get_percision(module.quantize_w, cfg=cfg)
n_list.append(act_n * w_n)
act_n_name = name + '.a_quant.a_size'
act_n = n_dict[act_n_name]
if isinstance(module, (nn.Linear)):
try:
w_n = PQ.get_percision(module.quantize_w, cfg=cfg)
except AttributeError:
w_n = 32
n_list.append(act_n * w_n)
elif model.args.base == 'VGG':
act_n = 32
for name, module in model.named_modules():
if is_supported_instance(module):
if isinstance(module, (conv_dhp)):
w_n = PQ.get_percision(module.quantize_w, cfg=cfg)
n_list.append(act_n * w_n)
act_n_name = name + '.a_quant.a_size'
act_n = n_dict[act_n_name]
if isinstance(module, (nn.Linear)):
w_n = PQ.get_percision(module.quantize_w, cfg=cfg)
n_list.append(act_n * w_n)
elif '18' in model.args.base:
for name, module in model.named_modules():
if is_supported_instance(module):
if isinstance(module, (conv_dhp)):
if '0' in name:
n_list.append(PQ.get_percision(module.quantize_w, cfg=cfg)*32)
elif 'layer1' in name or 'downsample' in name:
act_n_name = name.replace('.layer1', '.act_out.a_size')
act_n_name = act_n_name.replace('.downsample', '.act_out.a_size')
s = int(getNumbers(act_n_name)[0])
# [int(s) for s in act_n_name.split() if s.isdigit()]
if s == 1:
act_n_name = act_n_name.replace('act_out', 'a_quant')
act_n_name = act_n_name.replace(str(s), str(s-1))
act_n = n_dict[act_n_name]
n_list.append(PQ.get_percision(module.quantize_w, cfg=cfg) * act_n)
elif 'layer2' in name:
act_n_name = name.replace('.layer2', '.layer1.a_quant.a_size')
act_n = n_dict[act_n_name]
n_list.append(PQ.get_percision(module.quantize_w, cfg=cfg) * act_n)
elif isinstance(module, (nn.Linear)):
try:
w_n = PQ.get_percision(module.quantize_w, cfg=cfg)
except AttributeError:
w_n = 32
n_list.append(n_dict['features.8.act_out.a_size'] * w_n)
else:
for name, module in model.named_modules():
if is_supported_instance(module):
if isinstance(module, (conv_dhp)):
if '0' in name:
n_list.append(PQ.get_percision(module.quantize_w, cfg=cfg)*32)
elif 'layer1' in name or 'downsample' in name:
act_n_name = name.replace('.layer1', '.act_out.a_size')
act_n_name = act_n_name.replace('.downsample', '.act_out.a_size')
s = int(getNumbers(act_n_name)[0])
# [int(s) for s in act_n_name.split() if s.isdigit()]
if s == 1:
act_n_name = act_n_name.replace('act_out', 'a_quant')
act_n_name = act_n_name.replace(str(s), str(s-1))
act_n = n_dict[act_n_name]
n_list.append(PQ.get_percision(module.quantize_w, cfg=cfg) * act_n)
elif 'layer2' in name:
act_n_name = name.replace('.layer2', '.layer1.a_quant.a_size')
act_n = n_dict[act_n_name]
n_list.append(PQ.get_percision(module.quantize_w, cfg=cfg) * act_n)
elif isinstance(module, (nn.Linear)):
try:
w_n = PQ.get_percision(module.quantize_w, cfg=cfg)
except AttributeError:
w_n = 32
n_list.append(32 * w_n)
# n_list.append(PQ.get_percision(module.quantize_w, cfg=cfg)*n_dict['features.9.act_out.a_size'])
# for i in n_list:
# if i.item()>32*32:
# raise('yjm is here',i.item())
# print('yjm is here',i.item())
i = 0
for name, module in model.named_modules():
if is_supported_instance(module):
if isinstance(module, (nn.Conv2d, nn.ConvTranspose2d, conv_dhp)):
if not init_flops:
flops += conv_calc_flops_grad(module, n_list[i])
else:
flops += conv_calc_flops_grad(module, 32 * 32, pruned)
i += 1
# TODO: shortcut cal is need to be fixed!
elif isinstance(module, (nn.ReLU, nn.PReLU, nn.ELU, nn.LeakyReLU, nn.ReLU6)):
flops += relu_calc_flops(module) * 32
# if isinstance(module, nn.ReLU):
# print(module)
elif isinstance(module, (nn.Linear)):
if not init_flops:
flops += linear_calc_flops(module, n_list[i])
else:
flops += linear_calc_flops(module, 32 * 32)
i += 1
elif isinstance(module, (nn.BatchNorm2d)):
flops += bn_calc_flops(module) * 32 * 32
return flops
def conv_calc_flops_grad(self, nn, pruned=True, model=None):
# Do not count bias addition
# batch_size = 1
output_dims = np.prod(self.__output_dims__)
kernel_dims = np.prod(self.kernel_size) if isinstance(self.kernel_size, tuple) else self.kernel_size ** 2
in_channels = self.in_channels_remain if hasattr(self, 'in_channels_remain') else self.in_channels
out_channels = self.out_channels_remain if hasattr(self, 'out_channels_remain') else self.out_channels
groups = self.groups_remain if hasattr(self, 'groups_remain') else self.groups
# groups = self.groups
if pruned:
in_channels_num = self.in_channels_num
out_channels_num = self.out_channels_num
else:
in_channels_num = in_channels
out_channels_num = out_channels
# if pruned:
# if not in_channels == in_channels_num:
# print(self)
# print('in_channels is ', in_channels, 'in_channels_num is ', in_channels_num)
# # raise()
# if not out_channels == out_channels_num:
# print(self)
# print('out_channels is ', out_channels, 'out_channels_num is ', out_channels_num)
# # raise()
filters_per_channel = out_channels_num / groups
conv_per_position_flops = kernel_dims * in_channels_num * filters_per_channel
active_elements_count = output_dims
overall_conv_flops = conv_per_position_flops * active_elements_count
return overall_conv_flops*nn
def get_parameters_prune_only(model, cfg):
return get_parameters(model, cfg, init_params=True)
def get_parameters(model, cfg, init_params=False):
parameters = 0
for module in model.modules():
if is_supported_instance(module):
if isinstance(module, (nn.Conv2d, nn.ConvTranspose2d, nn.ReLU, nn.PReLU, nn.ELU, nn.LeakyReLU, nn.ReLU6)):
for p in module.parameters():
parameters += p.nelement() * 32
elif isinstance(module, nn.Linear):
in_features = module.in_features_remain if hasattr(module, 'in_features_remain') else module.in_features
out_features = module.out_features_remain if hasattr(module, 'out_features_remain') else module.out_features
try:
n = PQ.get_percision(module.quantize_w, cfg).item()
except AttributeError:
n = 32
if not init_params:
parameters += in_features * out_features * n
else:
parameters += in_features * out_features * 32
if module.bias is not None:
if not init_params:
parameters += module.out_features * n
else:
parameters += in_features * out_features * 32
elif isinstance(module, (conv_dhp)):
in_channels = module.in_channels_remain if hasattr(module, 'in_channels_remain') else module.in_channels
out_channels = module.out_channels_remain if hasattr(module, 'out_channels_remain') else module.out_channels
groups = module.groups_remain if hasattr(module, 'groups_remain') else module.groups
n = PQ.get_percision(module.quantize_w, cfg).item()
if not init_params:
parameters += in_channels // groups * out_channels * module.kernel_size ** 2 * n
else:
parameters += in_channels // groups * out_channels * module.kernel_size ** 2 * 32
if module.bias is not None:
if not init_params:
parameters += out_channels * n
else:
parameters += out_channels * 32
elif isinstance(module, nn.BatchNorm2d):
if module.affine:
num_features = module.num_features_remain if hasattr(module, 'num_features_remain') else module.num_features
parameters += num_features * 2 * 32
return parameters
def add_feature_dimension(net_main_module):
# adding additional methods to the existing module object,
# this is done this way so that each function has access to self object
net_main_module.start_dimension_add = start_dimension_add.__get__(net_main_module)
net_main_module.stop_dimension_add = stop_dimension_add.__get__(net_main_module)
return net_main_module
def start_dimension_add(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Activates the computation of mean flops consumption per image.
Call it before you run the network.
"""
self.apply(add_feat_dim_hook_function)
def stop_dimension_add(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Stops computing the mean flops consumption per image.
Call whenever you want to pause the computation.
"""
self.apply(remove_feat_dim_hook_function)
def add_feat_dim_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
return
if isinstance(module, (nn.Conv2d, nn.ConvTranspose2d, conv_dhp)):
handle = module.register_forward_hook(conv_feat_dim_hook)
elif isinstance(module, (nn.ReLU, nn.PReLU, nn.ELU, nn.LeakyReLU, nn.ReLU6)):
handle = module.register_forward_hook(relu_feat_dim_hook)
elif isinstance(module, nn.Linear):
handle = module.register_forward_hook(linear_feat_dim_hook)
elif isinstance(module, nn.BatchNorm2d):
handle = module.register_forward_hook(bn_feat_dim_hook)
else:
raise NotImplementedError('FLOPs calculation is not implemented for class {}'.format(module.__class__.__name__))
module.__flops_handle__ = handle
def remove_feat_dim_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
module.__flops_handle__.remove()
del module.__flops_handle__
# ---- Internal functions
def is_supported_instance(module):
if isinstance(module,
(
conv_dhp,
nn.Conv2d, nn.ConvTranspose2d,
nn.BatchNorm2d,
nn.Linear,
# nn.ReLU, nn.PReLU, nn.ELU, nn.LeakyReLU, nn.ReLU6,
)):
if hasattr(module, '__exclude_complexity__'):
return False
else:
return True
return False
def conv_feat_dim_hook(module, input, output):
module.__output_dims__ = output.shape[2:]
def conv_calc_flops(self, nn):
# Do not count bias addition
batch_size = 1
output_dims = np.prod(self.__output_dims__)
kernel_dims = np.prod(self.kernel_size) if isinstance(self.kernel_size, tuple) else self.kernel_size ** 2
in_channels = self.in_channels_remain if hasattr(self, 'in_channels_remain') else self.in_channels
out_channels = self.out_channels_remain if hasattr(self, 'out_channels_remain') else self.out_channels
groups = self.groups_remain if hasattr(self, 'groups_remain') else self.groups
# groups = self.groups
filters_per_channel = out_channels // groups
conv_per_position_flops = kernel_dims * in_channels * filters_per_channel
active_elements_count = batch_size * output_dims
overall_conv_flops = conv_per_position_flops * active_elements_count
return int(overall_conv_flops)*nn
def relu_feat_dim_hook(module, input, output):
s = output.shape
module.__output_dims__ = s[2:]
module.__output_channel__ = s[1]
def relu_calc_flops(self):
batch = 1
# TODO: relu channels attr is deleted
channels = self.channels if hasattr(self, 'channels') else self.__output_channel__
active_elements_count = batch * np.prod(self.__output_dims__) * channels
# print(active_elements_count, id(self))
# print(self)
return int(active_elements_count)
def linear_feat_dim_hook(module, input, output):
if len(output.shape[2:]) == 2:
module.__additional_dims__ = 1
else:
module.__additional_dims__ = output.shape[1:-1]
def linear_calc_flops(self, nn):
# Do not count bias addition
batch_size = 1
in_features = self.in_features_remain if hasattr(self, 'in_features_remain') else self.in_features
out_features = self.out_features_remain if hasattr(self, 'out_features_remain') else self.out_features
linear_flops = batch_size * np.prod(self.__additional_dims__) * in_features * out_features
# print(self.in_features, in_features)
return int(linear_flops)*nn
def bn_feat_dim_hook(module, input, output):
module.__output_dims__ = output.shape[2:]
def bn_calc_flops(self):
# Do not count bias addition
batch = 1
output_dims = np.prod(self.__output_dims__)
channels = self.num_features_remain if hasattr(self, 'num_features_remain') else self.num_features
batch_flops = batch * channels * output_dims
# print(self.num_features, channels)
if self.affine:
batch_flops *= 2
return int(batch_flops)
```
#### File: HNC_github/model/__init__.py
```python
import os
from importlib import import_module
import torch
import torch.nn as nn
from IPython import embed
class Model(nn.Module):
def __init__(self, args, cfg, checkpoint):
super(Model, self).__init__()
print('Making model...')
self.args = args
self.ckp = checkpoint
self.crop = self.args.crop
self.device = torch.device('cpu' if self.args.cpu else 'cuda')
self.precision = self.args.precision
self.n_GPUs = self.args.n_GPUs
self.save_models = self.args.save_models
if self.args.model.find('DeepComp') >= 0:
dc_type = self.args.model.split('-')[-1]
module = import_module('model.deepcomp')
self.model = module.make_model(self.args, dc_type)
else:
print('Import Module')
module = import_module('model.' + self.args.model.lower())
self.model = module.make_model(args, cfg)
# if not next(self.model.parameters()).is_cuda:
self.model = self.model.to(self.device)
if self.args.precision == 'half': self.model = self.model.half()
if not self.args.cpu:
print('CUDA is ready!')
torch.cuda.manual_seed(self.args.seed)
if self.args.n_GPUs > 1:
if not isinstance(self.model, nn.DataParallel):
self.model = nn.DataParallel(self.model, range(self.args.n_GPUs))
# in the test phase of network pruning
# if self.args.model.lower().find('prune') >= 0 and self.args.test_only:
# self.get_model().merge_conv()
# not in the training phase of network pruning
# if not (self.args.model.lower().find('prune') >= 0 and not self.args.test_only and not self.args.load):
self.load(pretrain=self.args.pretrain, load=self.args.load, resume=self.args.resume, cpu=self.args.cpu)
for m in self.modules():
if hasattr(m, 'set_range'):
m.set_range()
if self.args.print_model:
print(self.get_model(), file=self.ckp.log_file)
print(self.get_model())
self.summarize(self.ckp)
def forward(self, x):
if self.crop > 1:
b, n_crops, c, h, w = x.size()
x = x.view(-1, c, h, w)
x = self.model(x)
if self.crop > 1: x = x.view(b, n_crops, -1).mean(1)
return x
def get_model(self):
if self.n_GPUs == 1:
return self.model
else:
return self.model.module
def state_dict(self, **kwargs):
return self.get_model().state_dict(**kwargs)
def save(self, apath, epoch, converging=False, is_best=False):
target = self.get_model().state_dict()
conditions = (True, is_best, self.save_models)
if converging:
names = ('converging_latest', 'converging_best', 'converging_{}'.format(epoch))
else:
names = ('latest', 'best', '{}'.format(epoch))
for c, n in zip(conditions, names):
if c:
torch.save(target, os.path.join(apath, 'model', 'model_{}.pt'.format(n)))
def load(self, pretrain='', load='', resume=-1, cpu=False):
if pretrain:
f = os.path.join(pretrain, 'model/model_latest.pt') if pretrain.find('.pt') < 0 else pretrain
print('Load pre-trained model from {}'.format(f))
elif load:
if resume == -1:
print('Load model after the last epoch')
resume = 'latest'
else:
print('Load model after epoch {}'.format(resume))
f = os.path.join(load, 'model', 'model_{}.pt'.format(resume))
else:
f = None
if f:
kwargs = {}
if cpu:
kwargs = {'map_location': lambda storage, loc: storage}
state = torch.load(f, **kwargs)
# delete_key = []
# for k2, v2 in state.items():
# if k2.find('latent_vector') >= 0:
# delete_key.append(k2)
# for k2 in delete_key:
# state.pop(k2)
# for (k1, v1), (k2, v2) in zip(self.state_dict().items(), state.items()):
# print(v1.shape, v2.shape)
# embed()
# state = state['state_dict']
self.get_model().load_state_dict(state, strict=True)
def begin(self, epoch, ckp):
self.train()
m = self.get_model()
if hasattr(m, 'begin'):
m.begin(epoch, ckp)
def log(self, ckp):
m = self.get_model()
if hasattr(m, 'log'): m.log(ckp)
def summarize(self, ckp):
ckp.write_log('# parameters: {:,}'.format(sum([p.nelement() for p in self.model.parameters()])))
kernels_1x1 = 0
kernels_3x3 = 0
kernels_others = 0
gen = (c for c in self.model.modules() if isinstance(c, nn.Conv2d))
for m in gen:
kh, kw = m.kernel_size
n_kernels = m.in_channels * m.out_channels
if kh == 1 and kw == 1:
kernels_1x1 += n_kernels
elif kh == 3 and kw == 3:
kernels_3x3 += n_kernels
else:
kernels_others += n_kernels
linear = sum([l.weight.nelement() for l in self.model.modules() if isinstance(l, nn.Linear)])
ckp.write_log('1x1: {:,}\n3x3: {:,}\nOthers: {:,}\nLinear:{:,}\n'.
format(kernels_1x1, kernels_3x3, kernels_others, linear), refresh=True)
# if self.args.debug:
# def _get_flops(conv, x, y):
# _, _, h, w = y.size()
# kh, kw = conv.kernel_size
# conv.flops = h * w *conv.in_channels * conv.out_channels * kh * kw
# conv.flops_original = conv.flops
#
# for m in self.model.modules():
# if isinstance(m, nn.Conv2d):
# m.register_forward_hook(_get_flops)
```
#### File: model/in_use/flops_counter.py
```python
import torch.nn as nn
import torch
import numpy as np
# from IPython import embed
def get_model_flops(model, input_res, print_per_layer_stat=True,
input_constructor=None):
assert type(input_res) is tuple, 'Please provide the size of the input image.'
assert len(input_res) >= 3, 'Input image should have 3 dimensions.'
flops_model = add_flops_counting_methods(model)
flops_model.eval().start_flops_count()
if input_constructor:
input = input_constructor(input_res)
_ = flops_model(**input)
else:
device = list(flops_model.parameters())[-1].device
batch = torch.FloatTensor(1, *input_res).to(device)
_ = flops_model(batch)
if print_per_layer_stat:
print_model_with_flops(flops_model)
flops_count = flops_model.compute_average_flops_cost()
flops_model.stop_flops_count()
return flops_count
def get_model_complexity_info(model, input_res, print_per_layer_stat=True, as_strings=True,
input_constructor=None):
assert type(input_res) is tuple
assert len(input_res) >= 3
flops_model = add_flops_counting_methods(model)
flops_model.eval().start_flops_count()
if input_constructor:
input = input_constructor(input_res)
_ = flops_model(**input)
else:
batch = torch.FloatTensor(1, *input_res)
_ = flops_model(batch)
if print_per_layer_stat:
print_model_with_flops(flops_model)
flops_count = flops_model.compute_average_flops_cost()
params_count = get_model_parameters_number(flops_model)
flops_model.stop_flops_count()
if as_strings:
return flops_to_string(flops_count), params_to_string(params_count)
return flops_count, params_count
def flops_to_string(flops, units='GMac', precision=2):
if units is None:
if flops // 10**9 > 0:
return str(round(flops / 10.**9, precision)) + ' GMac'
elif flops // 10**6 > 0:
return str(round(flops / 10.**6, precision)) + ' MMac'
elif flops // 10**3 > 0:
return str(round(flops / 10.**3, precision)) + ' KMac'
else:
return str(flops) + ' Mac'
else:
if units == 'GMac':
return str(round(flops / 10.**9, precision)) + ' ' + units
elif units == 'MMac':
return str(round(flops / 10.**6, precision)) + ' ' + units
elif units == 'KMac':
return str(round(flops / 10.**3, precision)) + ' ' + units
else:
return str(flops) + ' Mac'
def params_to_string(params_num):
if params_num // 10 ** 6 > 0:
return str(round(params_num / 10 ** 6, 2)) + ' M'
elif params_num // 10 ** 3:
return str(round(params_num / 10 ** 3, 2)) + ' k'
else:
return str(params_num)
def print_model_with_flops(model, units='GMac', precision=3):
total_flops = model.compute_average_flops_cost()
def accumulate_flops(self):
if is_supported_instance(self):
return self.__flops__ / model.__batch_counter__
else:
sum = 0
for m in self.children():
sum += m.accumulate_flops()
return sum
def flops_repr(self):
accumulated_flops_cost = self.accumulate_flops()
return ', '.join([flops_to_string(accumulated_flops_cost, units=units, precision=precision),
'{:.3%} MACs'.format(accumulated_flops_cost / total_flops),
self.original_extra_repr()])
def add_extra_repr(m):
m.accumulate_flops = accumulate_flops.__get__(m)
flops_extra_repr = flops_repr.__get__(m)
if m.extra_repr != flops_extra_repr:
m.original_extra_repr = m.extra_repr
m.extra_repr = flops_extra_repr
assert m.extra_repr != m.original_extra_repr
def del_extra_repr(m):
if hasattr(m, 'original_extra_repr'):
m.extra_repr = m.original_extra_repr
del m.original_extra_repr
if hasattr(m, 'accumulate_flops'):
del m.accumulate_flops
model.apply(add_extra_repr)
print(model)
model.apply(del_extra_repr)
def get_model_parameters_number(model):
params_num = sum(p.numel() for p in model.parameters() if p.requires_grad)
return params_num
def add_flops_counting_methods(net_main_module):
# adding additional methods to the existing module object,
# this is done this way so that each function has access to self object
# embed()
net_main_module.start_flops_count = start_flops_count.__get__(net_main_module)
net_main_module.stop_flops_count = stop_flops_count.__get__(net_main_module)
net_main_module.reset_flops_count = reset_flops_count.__get__(net_main_module)
net_main_module.compute_average_flops_cost = compute_average_flops_cost.__get__(net_main_module)
net_main_module.reset_flops_count()
return net_main_module
def compute_average_flops_cost(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Returns current mean flops consumption per image.
"""
flops_sum = 0
for module in self.modules():
if is_supported_instance(module):
print(module.__flops__)
flops_sum += module.__flops__
return flops_sum
def start_flops_count(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Activates the computation of mean flops consumption per image.
Call it before you run the network.
"""
self.apply(add_flops_counter_hook_function)
def stop_flops_count(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Stops computing the mean flops consumption per image.
Call whenever you want to pause the computation.
"""
self.apply(remove_flops_counter_hook_function)
def reset_flops_count(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Resets statistics computed so far.
"""
self.apply(add_flops_counter_variable_or_reset)
def add_flops_counter_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
return
if isinstance(module, (nn.Conv2d, nn.Conv3d, nn.ConvTranspose2d)):
handle = module.register_forward_hook(conv_flops_counter_hook)
elif isinstance(module, (nn.ReLU, nn.PReLU, nn.ELU, nn.LeakyReLU, nn.ReLU6)):
handle = module.register_forward_hook(relu_flops_counter_hook)
elif isinstance(module, nn.Linear):
handle = module.register_forward_hook(linear_flops_counter_hook)
elif isinstance(module, (nn.BatchNorm2d)):
handle = module.register_forward_hook(bn_flops_counter_hook)
else:
handle = module.register_forward_hook(empty_flops_counter_hook)
module.__flops_handle__ = handle
def remove_flops_counter_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
module.__flops_handle__.remove()
del module.__flops_handle__
def add_flops_counter_variable_or_reset(module):
if is_supported_instance(module):
module.__flops__ = 0
# ---- Internal functions
def is_supported_instance(module):
if isinstance(module,
(
nn.Conv2d, nn.ConvTranspose2d,
# nn.BatchNorm2d,
nn.Linear,
# nn.ReLU, nn.PReLU, nn.ELU, nn.LeakyReLU, nn.ReLU6,
)):
return True
return False
def conv_flops_counter_hook(conv_module, input, output):
# Can have multiple inputs, getting the first one
# input = input[0]
batch_size = output.shape[0]
output_dims = list(output.shape[2:])
kernel_dims = list(conv_module.kernel_size)
in_channels = conv_module.in_channels
out_channels = conv_module.out_channels
groups = conv_module.groups
filters_per_channel = out_channels // groups
conv_per_position_flops = np.prod(kernel_dims) * in_channels * filters_per_channel
active_elements_count = batch_size * np.prod(output_dims)
overall_conv_flops = conv_per_position_flops * active_elements_count
# overall_flops = overall_conv_flops
conv_module.__flops__ += int(overall_conv_flops)
# conv_module.__output_dims__ = output_dims
def relu_flops_counter_hook(module, input, output):
active_elements_count = output.numel()
module.__flops__ += int(active_elements_count)
# print(module.__flops__, id(module))
# print(module)
def linear_flops_counter_hook(module, input, output):
input = input[0]
if len(input.shape) == 1:
batch_size = 1
module.__flops__ += int(batch_size * input.shape[0] * output.shape[0])
else:
batch_size = input.shape[0]
module.__flops__ += int(batch_size * input.shape[1] * output.shape[1])
def bn_flops_counter_hook(module, input, output):
# input = input[0]
# TODO: need to check here
# batch_flops = np.prod(input.shape)
# if module.affine:
# batch_flops *= 2
# module.__flops__ += int(batch_flops)
batch = output.shape[0]
output_dims = output.shape[2:]
channels = module.num_features
batch_flops = batch * channels * np.prod(output_dims)
if module.affine:
batch_flops *= 2
module.__flops__ += int(batch_flops)
def empty_flops_counter_hook(module, input, output):
module.__flops__ += 0
def upsample_flops_counter_hook(module, input, output):
output_size = output[0]
batch_size = output_size.shape[0]
output_elements_count = batch_size
for val in output_size.shape[1:]:
output_elements_count *= val
module.__flops__ += int(output_elements_count)
def pool_flops_counter_hook(module, input, output):
input = input[0]
module.__flops__ += int(np.prod(input.shape))
def dconv_flops_counter_hook(dconv_module, input, output):
input = input[0]
batch_size = input.shape[0]
output_dims = list(output.shape[2:])
m_channels, in_channels, kernel_dim1, _, = dconv_module.weight.shape
out_channels, _, kernel_dim2, _, = dconv_module.projection.shape
# groups = dconv_module.groups
# filters_per_channel = out_channels // groups
conv_per_position_flops1 = kernel_dim1 ** 2 * in_channels * m_channels
conv_per_position_flops2 = kernel_dim2 ** 2 * out_channels * m_channels
active_elements_count = batch_size * np.prod(output_dims)
overall_conv_flops = (conv_per_position_flops1 + conv_per_position_flops2) * active_elements_count
overall_flops = overall_conv_flops
dconv_module.__flops__ += int(overall_flops)
# dconv_module.__output_dims__ = output_dims
```
#### File: HNC_github/util/trainer_dhp.py
```python
from util import utility
import torch
import torch.nn as nn
from tqdm import tqdm
from model.in_use.flops_counter import get_model_flops
from model_dhp.flops_counter_dhp import get_flops, get_flops_prune_only, get_parameters_prune_only, \
get_parameters, set_output_dimension, get_flops_grad
from model_dhp.dhp_base import set_finetune_flag
from tensorboardX import SummaryWriter
import os
import matplotlib
from torch.autograd import Variable
import torch.nn.functional as F
import math
import model_dhp.parametric_quantization as PQ
matplotlib.use('Agg')
class Trainer():
def __init__(self, args, cfg, loader, my_model, my_loss=None, ckp=None, writer=None, converging=False):
self.args = args
self.ckp = ckp
self.loader_train = loader.loader_train
self.loader_test = loader.loader_test
self.model = my_model
self.loss = my_loss
self.cfg = cfg
self.var_weight = args.var_weight
self.reg_weight = args.reg_weight
self.writer = writer
if args.data_train.find('CIFAR') >= 0:
self.input_dim = (3, 32, 32)
elif args.data_train.find('Tiny_ImageNet') >= 0:
self.input_dim = (3, 64, 64)
else:
self.input_dim = (3, 224, 224)
# if converging:
# self.reset_after_searching()
#
set_output_dimension(self.model.get_model(), self.input_dim)
self.flops = get_flops(self.model.get_model(), cfg, init_flops=True, pruned=False)
self.flops_prune = self.flops # at initialization, no pruning is conducted.
self.flops_compression_ratio = self.flops_prune / self.flops
self.params = get_parameters(self.model.get_model(), cfg, init_params=True)
self.params_prune = self.params
self.params_compression_ratio = self.params_prune / self.params
self.flops_prune_only = get_flops(self.model.get_model(), cfg, init_flops=True, pruned=False)
self.flops_compression_ratio_prune_only = (self.flops_prune_only / self.flops)
self.params_prune_only = self.params
self.params_compression_ratio_prune_only = self.params_prune_only / self.params
self.flops_ratio_log = []
self.params_ratio_log = []
self.converging = converging
self.ckp.write_log('\nThe computation complexity and number of parameters of the current network is as follows.'
'\nFlops: {:.4f} [G]\tParams {:.2f} [k]'.format(self.flops / 10. ** 9,
self.params / 10. ** 3))
# self.flops_another = get_model_flops(self.model.get_model(), self.input_dim, False)
# self.ckp.write_log('Flops: {:.4f} [G] calculated by the original counter. \nMake sure that the two calculated '
# 'Flops are the same.\n'.format(self.flops_another / 10. ** 9))
self.optimizer = utility.make_optimizer_dhp(args, self.model, ckp=ckp, converging=converging)
self.scheduler = utility.make_scheduler_dhp(args, self.optimizer, args.decay.split('+')[0], converging=converging)
self.device = torch.device('cpu' if args.cpu else 'cuda')
if args.model.find('INQ') >= 0:
self.inq_steps = args.inq_steps
else:
self.inq_steps = None
def reset_after_searching(self):
# Phase 1 & 3, model reset here.
# PHase 2 & 4, model reset at initialization
# In Phase 1 & 3, the optimizer and scheduler are reset.
# In Phase 2, the optimizer and scheduler is not used.
# In Phase 4, the optimizer and scheduler is already set during the initialization of the trainer.
# during the converging stage, self.converging =True. Do not need to set lr_adjust_flag in make_optimizer_hinge
# and make_scheduler_hinge.
if not self.converging and not self.args.test_only:
self.model.get_model().reset_after_searching()
self.converging = True
del self.optimizer, self.scheduler
torch.cuda.empty_cache()
decay = self.args.decay if len(self.args.decay.split('+')) == 1 else self.args.decay.split('+')[1]
self.optimizer = utility.make_optimizer_dhp(self.args, self.model, converging=self.converging)#, lr=
self.scheduler = utility.make_scheduler_dhp(self.args, self.optimizer, decay,
converging=self.converging)
self.flops_prune = get_flops(self.model.get_model(), self.cfg)
self.flops_compression_ratio = self.flops_prune / self.flops
self.params_prune = get_parameters(self.model.get_model(), self.cfg)
self.params_compression_ratio = self.params_prune / self.params
if not self.args.test_only and self.args.summary:
self.writer = SummaryWriter(os.path.join(self.args.dir_save, self.args.save), comment='converging')
if os.path.exists(os.path.join(self.ckp.dir, 'epochs.pt')):
self.epochs_searching = torch.load(os.path.join(self.ckp.dir, 'epochs.pt'))
def train(self):
epoch, lr = self.start_epoch()
self.model.begin(epoch, self.ckp)
self.loss.start_log()
timer_data, timer_model = utility.timer(), utility.timer()
# timer_back = utility.timer()
# timer_opt = utility.timer()
# timer_l1 = utility.timer()
# timer_setp = utility.timer()
# timer_ford = utility.timer()
n_samples = 0
for batch, (img, label) in enumerate(self.loader_train):
img, label = self.prepare(img, label)
n_samples += img.size(0)
timer_data.hold()
timer_model.tic()
self.optimizer.zero_grad()
# print('DEBUG: ============> model forward:')
prediction = self.model(img)
# print('DEBUG: ============> model forward end')
loss, _ = self.loss(prediction, label)
# timer_model.hold()
reg_loss = Variable(torch.Tensor([-1]).cuda(), requires_grad=False)
var_loss = Variable(torch.Tensor([-1]).cuda(), requires_grad=False)
# if self.var_weight>0:
# var_loss = 0
# for latent in self.model.get_model().gather_latent_vector()[2::]:
# x = F.normalize(latent.abs(), p=1, dim=0)
# var_loss -= torch.var(x)
# + self.var_weight * var_loss
# timer_back.tic()
# timer_back.hold()
if not self.converging:
# print('DEBUG: ============> model set parameters')
# timer_l1.tic()
self.model.get_model().proximal_operator(lr)
# if batch % self.args.compression_check_frequency == 0:
self.model.get_model().set_parameters()
# self.flops_prune = get_flops(self.model.get_model(), self.cfg)
# self.flops_compression_ratio = (self.flops_prune / self.flops)
# self.params_prune = get_parameters(self.model.get_model(), self.cfg)
# self.params_compression_ratio = self.params_prune / self.params
# self.flops_ratio_log.append(self.flops_compression_ratio)
# self.params_ratio_log.append(self.params_compression_ratio)
# self.flops_prune_only = get_flops_prune_only(self.model.get_model(), self.cfg,pruned=False)
# self.flops_compression_ratio_prune_only = (self.flops_prune_only / self.flops)
# if self.flops_compression_ratio_prune_only>1:
# raise('why % > 1')
# self.params_prune_only = get_parameters_prune_only(self.model.get_model(), self.cfg)
# self.params_compression_ratio_prune_only = self.params_prune_only / self.params
# if batch % 300 == 0:
# self.model.get_model().latent_vector_distribution(epoch, batch + 1, self.ckp.dir)
# self.model.get_model().per_layer_compression_ratio_quantize_precision(epoch, batch + 1, self.ckp.dir, cfg=self.cfg)
# timer_l1.hold()
# timer_setp.tic()
#
# timer_setp.hold()
# print('DEBUG: ============> model set parameters end')'
t = self.args.ratio * self.flops
if self.args.gradually:
t = t * (10-epoch//10+1)/2
# t = t * (3-epoch//10+1)/2
reg_loss = torch.log(torch.max((get_flops_grad(self.model.get_model(), self.cfg)/t).cuda(),
Variable(torch.Tensor([1]).cuda(), requires_grad=False)))
(loss + self.reg_weight * reg_loss).backward()
# # VAR_LOSS V1
# for latent in self.model.get_model().gather_latent_vector()[1::]:
# var_loss += torch.var(latent)
# # VAR_LOSS V2
# for latent in self.model.get_model().gather_latent_vector()[1::]:
# var_loss -= torch.var(latent)
# VAR_LOSS V4
PQ.clip_quant_grads(self.model.get_model(), self.cfg)
# timer_opt.tic()
self.optimizer.step()
# timer_opt.hold()
# print('yjm is here to debug timeback is:', timer_model.release(), timer_back.release(), timer_opt.release(),
# timer_l1.release(), timer_setp.release())
PQ.clip_quant_vals(self.model.get_model(), self.cfg)
else:
loss.backward()
self.optimizer.step()
# proximal operator
if not self.converging:
if batch % self.args.compression_check_frequency == 0 or batch==0:
self.flops_prune = get_flops(self.model.get_model(), self.cfg)
self.flops_compression_ratio = (self.flops_prune / self.flops)
self.params_prune = get_parameters(self.model.get_model(), self.cfg)
self.params_compression_ratio = self.params_prune / self.params
self.flops_ratio_log.append(self.flops_compression_ratio)
self.params_ratio_log.append(self.params_compression_ratio)
self.flops_prune_only = get_flops_prune_only(self.model.get_model(), self.cfg,pruned=False)
self.flops_compression_ratio_prune_only = (self.flops_prune_only / self.flops)
self.params_prune_only = get_parameters_prune_only(self.model.get_model(), self.cfg)
self.params_compression_ratio_prune_only = self.params_prune_only / self.params
if batch % 300 == 0 or batch==0:
for name,v in self.model.get_model().named_modules():
# print(name)
if 'quantize' in name:
print("layer {}, \t\tw precision: {}, \tdelta: {}, \txmax: {}".format(
name, PQ.get_percision(v, self.cfg).item(), v.d.item(), v.xmax.item()))
if 'act_out.a_quant' in name or 'a_quant.a_quant' in name:
try:
print("layer {}, \tap precision: {}, \tdelta: {}, \txmax: {}".format(
name, PQ.get_percision_a(v, self.cfg).item(), v.d.item(), v.xmax.item()))
except IOError:
pass
else:
pass
self.model.get_model().latent_vector_distribution(epoch, batch + 1, self.ckp.dir)
self.model.get_model().per_layer_compression_ratio_quantize_precision(epoch, batch + 1, self.ckp.dir, cfg=self.cfg)
timer_model.hold()
if batch % self.args.print_every == 0 or batch==0:
self.ckp.write_log('{}/{} ({:.0f}%)\t'
'NLL: {:.3f}\tTop1: {:.2f} / Top5: {:.2f}\t'
'Params Loss: {:.3f}\t'
'Var Loss: {:.3f}\t'
'Time: {:.1f}+{:.1f}s\t'
'Flops Ratio: {:.2f}% = {:.4f} [G] / {:.4f} [G]\t'
'Params Ratio: {:.2f}% = {:.2f} [k] / {:.2f} [k]\t'
'Flops_prune_only Ratio: {:.2f}% = {:.4f} [G] / {:.4f} [G]\t'
'Params_prune_only Ratio: {:.2f}% = {:.2f} [k] / {:.2f} [k]'.format(
n_samples, len(self.loader_train.dataset), 100.0 * n_samples / len(self.loader_train.dataset),
*(self.loss.log_train[-1, :] / n_samples), reg_loss.item(), var_loss.item(),
timer_model.release(), timer_data.release(),
self.flops_compression_ratio * 100, self.flops_prune / 10. ** 9, self.flops / 10. ** 9,
self.params_compression_ratio * 100, self.params_prune / 10. ** 3, self.params / 10. ** 3,
self.flops_compression_ratio_prune_only * 100, self.flops_prune_only / 10. ** 9, self.flops / 10. ** 9,
self.params_compression_ratio_prune_only * 100, self.params_prune_only / 10. ** 3, self.params / 10. ** 3))
if self.args.summary:
if (batch + 1) % 50 == 0:
self.writer.add_scalar('regloss/', reg_loss.item(),
1000 * (epoch - 1) + batch)
for name, param in self.model.named_parameters():
if name.find('features') >= 0 and name.find('weight') >= 0:
self.writer.add_scalar('data/' + name, param.clone().cpu().data.abs().mean().numpy(),
1000 * (epoch - 1) + batch)
if param.grad is not None:
self.writer.add_scalar('data/' + name + '_grad',
param.grad.clone().cpu().data.abs().mean().numpy(),
1000 * (epoch - 1) + batch)
if (batch + 1) == 500:
self.writer.add_scalar('regloss/', reg_loss.item(),
1000 * (epoch - 1) + batch)
for name, param in self.model.named_parameters():
if name.find('features') >= 0 and name.find('weight') >= 0:
self.writer.add_histogram(name, param.clone().cpu().data.numpy(), 1000 * (epoch - 1) + batch)
if param.grad is not None:
self.writer.add_histogram(name + '_grad', param.grad.clone().cpu().data.numpy(),
1000 * (epoch - 1) + batch)
timer_data.tic()
if not self.converging:
self.model.get_model().set_parameters()
self.flops_prune = get_flops(self.model.get_model(), self.cfg)
self.flops_compression_ratio = (self.flops_prune / self.flops)
self.params_prune = get_parameters(self.model.get_model(), self.cfg)
self.params_compression_ratio = self.params_prune / self.params
self.flops_ratio_log.append(self.flops_compression_ratio)
self.params_ratio_log.append(self.params_compression_ratio)
self.flops_prune_only = get_flops_prune_only(self.model.get_model(), self.cfg,pruned=False)
self.flops_compression_ratio_prune_only = (self.flops_prune_only / self.flops)
self.params_prune_only = get_parameters_prune_only(self.model.get_model(), self.cfg)
self.params_compression_ratio_prune_only = self.params_prune_only / self.params
# if not self.converging and self.terminate():
# break
self.ckp.write_log('{}/{} ({:.0f}%)\t'
'NLL: {:.3f}\tTop1: {:.2f} / Top5: {:.2f}\t'
'Params Loss: {:.3f}\t'
'Var Loss: {:.3f}\t'
'Flops Ratio: {:.2f}% = {:.4f} [G] / {:.4f} [G]\t'
'Params Ratio: {:.2f}% = {:.2f} [k] / {:.2f} [k]\t'
'Flops_prune_only Ratio: {:.2f}% = {:.4f} [G] / {:.4f} [G]\t'
'Params_prune_only Ratio: {:.2f}% = {:.2f} [k] / {:.2f} [k]'.format(
n_samples, len(self.loader_train.dataset), 100.0 * n_samples / len(self.loader_train.dataset),
*(self.loss.log_train[-1, :] / n_samples),
reg_loss.item(), var_loss.item(), self.flops_compression_ratio * 100,
self.flops_prune / 10. ** 9, self.flops / 10. ** 9,
self.params_compression_ratio * 100,
self.params_prune / 10. ** 3, self.params / 10. ** 3,
self.flops_compression_ratio_prune_only * 100,
self.flops_prune_only / 10. ** 9, self.flops / 10. ** 9,
self.params_compression_ratio_prune_only * 100,
self.params_prune_only / 10. ** 3, self.params / 10. ** 3))
self.model.get_model().latent_vector_distribution(epoch, 1, self.ckp.dir)
self.model.get_model().per_layer_compression_ratio_quantize_precision(epoch, 1, self.ckp.dir, cfg=self.cfg)
self.model.log(self.ckp) # TODO: why this is used?
self.loss.end_log(len(self.loader_train.dataset))
def test(self):
epoch = self.scheduler.last_epoch + 1
self.ckp.write_log('\nEvaluation:')
self.loss.start_log(train=False)
self.model.eval()
timer_test = utility.timer()
timer_test.tic()
with torch.no_grad():
for img, label in tqdm(self.loader_test, ncols=80):
img, label = self.prepare(img, label)
prediction = self.model(img)
self.loss(prediction, label, train=False)
self.loss.end_log(len(self.loader_test.dataset), train=False)
# Lower is better
best = self.loss.log_test.min(0)
for i, measure in enumerate(('Loss', 'Top1 error', 'Top5 error')):
self.ckp.write_log('{}: {:.3f} (Best: {:.3f} from epoch {})'.
format(measure, self.loss.log_test[-1, i], best[0][i], best[1][i] + 1))
if hasattr(self, 'epochs_searching') and self.converging:
best = self.loss.log_test[:self.epochs_searching, :].min(0)
self.ckp.write_log('\nBest during searching')
for i, measure in enumerate(('Loss', 'Top1 error', 'Top5 error')):
self.ckp.write_log('{}: {:.3f} from epoch {}'.format(measure, best[0][i], best[1][i]))
self.ckp.write_log('Time: {:.2f}s\n'.format(timer_test.toc()), refresh=True)
is_best = self.loss.log_test[-1, self.args.top] <= best[0][self.args.top]
self.ckp.save(self, epoch, converging=self.converging, is_best=is_best)
self.ckp.save_results(epoch, self.model)
# scheduler.step is moved from training procedure to test procedure
self.scheduler.step()
def prepare(self, *args):
def _prepare(x):
x = x.to(self.device)
if self.args.precision == 'half':
x = x.half()
return x
return [_prepare(a) for a in args]
def start_epoch(self):
epoch = self.scheduler.last_epoch + 1
lr = self.scheduler.get_lr()[0]
if not self.converging:
stage = 'Searching Stage'
else:
stage = 'Converging Stage (Searching Epoch {})'.format(self.epochs_searching)
self.ckp.write_log('[Epoch {}]\tLearning rate: {:.2}\t{}'.format(epoch, lr, stage))
return epoch, lr
def terminate(self):
if self.args.test_only:
self.test()
return True
else:
epoch = self.scheduler.last_epoch + 1
if self.converging:
return epoch >= self.args.epochs
else:
print("current ratio: ", self.flops_compression_ratio, " target ratio: ",self.args.ratio, "is end? : ",
((self.flops_compression_ratio - self.args.ratio) <= self.args.stop_limit or epoch > 99))
return ((self.flops_compression_ratio - self.args.ratio) <= self.args.stop_limit or epoch > 99) and epoch>=30
``` |
{
"source": "jmirabel/agimus-demos",
"score": 2
} |
#### File: talos/camera_calibration/convert_to_visp_yaml.py
```python
import time
import rosbag
import rospy
from geometry_msgs.msg import Transform as TransformROS
from hpp import Transform
from hpp.corbaserver.manipulation.robot import CorbaClient, Robot
def publishTransform(publisher, transform):
t = TransformROS()
t.rotation.x = transform.quaternion.array[0]
t.rotation.y = transform.quaternion.array[1]
t.rotation.z = transform.quaternion.array[2]
t.rotation.w = transform.quaternion.array[3]
t.translation.x = transform.translation[0]
t.translation.y = transform.translation[1]
t.translation.z = transform.translation[2]
publisher.publish(t)
client = CorbaClient()
bag = rosbag.Bag("/usr/localDev/rosbag-calib/pyrene-calib.bag")
Robot.packageName = "agimus_demos"
Robot.urdfName = "talos"
Robot.urdfSuffix = "_calibration_camera"
Robot.srdfSuffix = ""
robot = Robot("talos", "talos", rootJointType="freeflyer", client=client)
q = robot.getCurrentConfig()
pub_cMo = rospy.Publisher("/camera_object", TransformROS, queue_size=100)
pub_fMe = rospy.Publisher("/world_effector", TransformROS, queue_size=100)
rospy.init_node("pose_sender", anonymous=True)
i = 0
for (_, joint_states, _), (_, checkerboard_pose, _) in zip(
bag.read_messages(topics=["joints"]), bag.read_messages(topics=["chessboard"])
):
root_joint_rank = robot.rankInConfiguration["talos/root_joint"]
q[root_joint_rank : root_joint_rank + 7] = [0, 0, 1, 0, 0, 0, 1]
joints_name_value_tuple = zip(joint_states.name, joint_states.position)
for name, value in joints_name_value_tuple:
joint_name = "talos/" + name
q[robot.rankInConfiguration[joint_name]] = value
robot.setCurrentConfig(q)
gripper = Transform(robot.getJointPosition("talos/gripper_left_base_link_joint"))
camera = Transform(robot.getJointPosition("talos/rgbd_rgb_optical_joint"))
fMe = gripper.inverse() * camera
cMo = Transform(
[
checkerboard_pose.pose.position.x,
checkerboard_pose.pose.position.y,
checkerboard_pose.pose.position.z,
checkerboard_pose.pose.orientation.x,
checkerboard_pose.pose.orientation.y,
checkerboard_pose.pose.orientation.z,
checkerboard_pose.pose.orientation.w,
]
)
publishTransform(pub_cMo, cMo)
publishTransform(pub_fMe, fMe)
i += 1
time.sleep(0.05)
```
#### File: talos/pickup_cardboard_box/common_hpp.py
```python
from hpp.corbaserver.manipulation import ConstraintGraph, ProblemSolver, Rule
from hpp.corbaserver.manipulation.robot import Robot
from hpp.gepetto.manipulation import ViewerFactory
Robot.packageName = "talos_data"
Robot.urdfName = "talos"
Robot.urdfSuffix = "_full_v2"
Robot.srdfSuffix = ""
class Box(object):
rootJointType = "freeflyer"
packageName = "gerard_bauzil"
urdfName = "cardboard_box"
urdfSuffix = ""
srdfSuffix = ""
handles = ["box/handle1", "box/handle2"]
contacts = ["box/bottom_surface"]
class Table(object):
rootJointType = "anchor"
packageName = "gerard_bauzil"
urdfName = "pedestal_table"
urdfSuffix = ""
srdfSuffix = ""
pose = "pose"
contacts = ["table/support"]
Object = Box
half_sitting = [
# -0.74,0,1.0192720229567027,0,0,0,1, # root_joint
-0.6,
-0.2,
1.0192720229567027,
0,
0,
0,
1, # root_joint
0.0,
0.0,
-0.411354,
0.859395,
-0.448041,
-0.001708, # leg_left
0.0,
0.0,
-0.411354,
0.859395,
-0.448041,
-0.001708, # leg_right
0,
0.006761, # torso
0.25847,
0.173046,
-0.0002,
-0.525366,
0,
0,
0.1, # arm_left
0,
0,
0,
0,
0,
0,
0, # gripper_left
-0.25847,
-0.173046,
0.0002,
-0.525366,
0,
0,
0.1, # arm_right
0,
0,
0,
0,
0,
0,
0, # gripper_right
0,
0, # head
-0.04,
0,
1.095 + 0.071,
0,
0,
1,
0, # box
]
def makeRobotProblemAndViewerFactory(clients):
robot = Robot("talos", "talos", rootJointType="freeflyer", client=clients)
robot.leftAnkle = "talos/leg_left_6_joint"
robot.rightAnkle = "talos/leg_right_6_joint"
robot.setJointBounds("talos/root_joint", [-1, 1, -1, 1, 0.5, 1.5])
ps = ProblemSolver(robot)
ps.setRandomSeed(123)
ps.selectPathProjector("Progressive", 0.2)
ps.setErrorThreshold(1e-3)
ps.setMaxIterProjection(40)
ps.addPathOptimizer("SimpleTimeParameterization")
vf = ViewerFactory(ps)
vf.loadObjectModel(Object, "box")
robot.setJointBounds("box/root_joint", [-1, 1, -1, 1, 0, 2])
# Loaded as an object to get the visual tags at the right position.
# vf.loadEnvironmentModel (Table, 'table')
vf.loadObjectModel(Table, "table")
return robot, ps, vf
def makeGraph(robot):
from hpp.corbaserver.manipulation.constraint_graph_factory import (
ConstraintGraphFactory,
)
graph = ConstraintGraph(robot, "graph")
factory = ConstraintGraphFactory(graph)
factory.setGrippers(["talos/left_gripper"])
factory.setObjects(["box"], [Object.handles], [Object.contacts])
factory.environmentContacts(Table.contacts)
factory.setRules(
[
Rule(["talos/left_gripper"], [Object.handles[1]], False),
# Rule([ "talos/left_gripper", ], [ Object.handles[0], ], True),
Rule(["talos/left_gripper"], [".*"], True),
# Rule([ "talos/right_gripper", ], [ Object.handles[1], ], True),
]
)
factory.generate()
return graph
```
#### File: talos/pickup_object/estimation.py
```python
import numpy as np
from hpp import Transform
from hpp.corbaserver.manipulation import ConstraintGraph, ProblemSolver, Rule
from hpp.corbaserver.manipulation.robot import CorbaClient, Robot
from hpp.gepetto.manipulation import ViewerFactory
import CORBA
clients = CorbaClient(postContextId="_estimation")
clients.manipulation.problem.resetProblem()
Robot.packageName = "talos_data"
Robot.urdfName = "talos"
Robot.urdfSuffix = "_full_v2"
Robot.srdfSuffix = ""
class Box(object):
rootJointType = "freeflyer"
packageName = "hpp_tutorial"
meshPackageName = "hpp_tutorial"
urdfName = "cup"
urdfSuffix = ""
srdfSuffix = ""
handles = ["box/top", "box/bottom"]
contacts = ["box/box_surface"]
class Brick(object):
rootJointType = "freeflyer"
packageName = "gerard_bauzil"
urdfName = "cobblestone"
urdfSuffix = ""
srdfSuffix = ""
handles = ["box/handle1", "box/handle2"]
poses = ["box/pose1", "box/pose2"]
class Table(object):
packageName = "gerard_bauzil"
urdfName = "pedestal_table"
urdfSuffix = ""
srdfSuffix = ""
pose = "pose"
contacts = ["table/support"]
# Object = Box
Object = Brick
half_sitting = [
# -0.74,0,1.0192720229567027,0,0,0,1, # root_joint
-0.6,
-0.2,
1.0192720229567027,
0,
0,
0,
1, # root_joint
0.0,
0.0,
-0.411354,
0.859395,
-0.448041,
-0.001708, # leg_left
0.0,
0.0,
-0.411354,
0.859395,
-0.448041,
-0.001708, # leg_right
0,
0.006761, # torso
0.25847,
0.173046,
-0.0002,
-0.525366,
0,
0,
0.1, # arm_left
0,
0,
0,
0,
0,
0,
0, # gripper_left
-0.25847,
-0.173046,
0.0002,
-0.525366,
0,
0,
0.1, # arm_right
0,
0,
0,
0,
0,
0,
0, # gripper_right
0,
0, # head
0,
0,
0,
0,
0,
0,
1, # box
]
def makeRobotProblemAndViewerFactory(corbaclient):
robot = Robot("dev", "talos", rootJointType="freeflyer", client=corbaclient)
robot.leftAnkle = "talos/leg_left_6_joint"
robot.rightAnkle = "talos/leg_right_6_joint"
robot.setJointBounds("talos/root_joint", [-1, 1, -1, 1, 0.5, 1.5])
ps = ProblemSolver(robot)
ps.setRandomSeed(123)
ps.selectPathProjector("Progressive", 0.2)
ps.setErrorThreshold(1e-3)
ps.setMaxIterProjection(40)
ps.addPathOptimizer("SimpleTimeParameterization")
vf = ViewerFactory(ps)
vf.loadObjectModel(Object, "box")
robot.setJointBounds("box/root_joint", [-1, 1, -1, 1, 0, 2])
vf.loadEnvironmentModel(Table, "table")
return robot, ps, vf
def makeGraph(robot):
graph = ConstraintGraph.buildGenericGraph(
robot,
"graph",
# [ "talos/left_gripper", "talos/right_gripper", "table/pose", ],
["talos/left_gripper", "table/pose"],
["box"],
[Object.handles + Object.poses],
# [ Object.contacts, ],
[[]],
Table.contacts,
[
Rule(["table/pose"], ["box/handle[12]"], False),
Rule(["talos/left_gripper"], ["box/pose[12]"], False),
Rule(["table/pose"], ["box/pose1"], False),
Rule(["talos/left_gripper"], [Object.handles[1]], False),
Rule(["talos/left_gripper"], [Object.handles[0]], True),
# Rule([ "talos/right_gripper", ], [ Object.handles[1], ], True),
Rule(["table/pose"], ["box/pose2"], True),
],
)
return graph
robot, ps, vf = makeRobotProblemAndViewerFactory(clients)
q_init = robot.getCurrentConfig()
ps.addPartialCom("talos", ["talos/root_joint"])
ps.addPartialCom("talos_box", ["talos/root_joint", "box/root_joint"])
ps.createStaticStabilityConstraints(
"balance", half_sitting, "talos", ProblemSolver.FIXED_ON_THE_GROUND
)
foot_placement = ["balance/pose-left-foot", "balance/pose-right-foot"]
foot_placement_complement = []
robot.setCurrentConfig(half_sitting)
com_wf = np.array(ps.getPartialCom("talos"))
tf_la = Transform(robot.getJointPosition(robot.leftAnkle))
com_la = tf_la.inverse().transform(com_wf)
ps.createRelativeComConstraint(
"com_talos_box", "talos_box", robot.leftAnkle, com_la.tolist(), (True, True, True)
)
ps.createRelativeComConstraint(
"com_talos", "talos", robot.leftAnkle, com_la.tolist(), (True, True, True)
)
ps.createPositionConstraint(
"gaze",
"talos/rgbd_optical_joint",
"box/root_joint",
(0, 0, 0),
(0, 0, 0),
(True, True, False),
)
left_gripper_lock = []
right_gripper_lock = []
other_lock = ["talos/torso_1_joint"]
for n in robot.jointNames:
s = robot.getJointConfigSize(n)
r = robot.rankInConfiguration[n]
if n.startswith("talos/gripper_right"):
ps.createLockedJoint(n, n, half_sitting[r : r + s])
right_gripper_lock.append(n)
elif n.startswith("talos/gripper_left"):
ps.createLockedJoint(n, n, half_sitting[r : r + s])
left_gripper_lock.append(n)
elif n in other_lock:
ps.createLockedJoint(n, n, half_sitting[r : r + s])
graph = makeGraph(robot)
graph.setConstraints(
graph=True,
lockDof=left_gripper_lock + right_gripper_lock + other_lock,
numConstraints=["com_talos_box", "gaze"] + foot_placement,
)
graph.initialize()
res, q_init, err = graph.applyNodeConstraints(
"table/pose grasps box/pose2", half_sitting
)
# res, q_goal, err = graph.applyNodeConstraints("talos/right_gripper grasps box/bottom", half_sitting)
# print(ps.directPath(q_init, q_init, True))
ps.setInitialConfig(q_init)
# ps.addGoalConfig(q_goal)
ps.setTargetState(graph.nodes["talos/left_gripper grasps box/handle1"])
ps.setParameter("SimpleTimeParameterization/safety", 0.5)
ps.setParameter("SimpleTimeParameterization/order", 2)
ps.setParameter("SimpleTimeParameterization/maxAcceleration", 2.0)
ps.setParameter("ConfigurationShooter/Gaussian/standardDeviation", 0.05)
ps.client.basic.problem.selectConfigurationShooter("Gaussian")
# ps.setRandomSeed(1)
print(ps.solve())
ps.client.manipulation.problem.selectProblem("estimation")
clients.manipulation.problem.resetProblem()
robotEst, psEst, vfEst = makeRobotProblemAndViewerFactory(clients)
graphEst = makeGraph(robotEst)
graphEst.initialize()
psEst.setParameter("SimpleTimeParameterization/safety", CORBA.Any(CORBA.TC_double, 0.5))
psEst.setParameter("SimpleTimeParameterization/order", 2)
```
#### File: common/navigation_map/calibration_base_to_mocap_frame.py
```python
import csv, numpy as np
def read(filename):
with open(filename, 'r') as f:
csv_reader = csv.reader(f, delimiter='\t')
# headers
for i in range(12):
csv_reader.next()
data = np.array([ [ float(x) for x in row[2:-1] ] for row in csv_reader ])
return data
import matplotlib.pyplot as plt
d0 = read("tiago_calibration_0_6D.tsv")
d1 = read("tiago_calibration_1_6D.tsv")
d2 = read("tiago_calibration_2_6D.tsv")
def plot(d):
ax=plt.subplot(3,1,1)
ax.plot(d[:,0], d[:,1])
ax=plt.subplot(3,1,2)
ax.plot(d[:,2])
ax.plot(d[:,3])
ax.plot(d[:,4])
ax=plt.subplot(3,1,3)
ax.plot(d[:,5])
plt.show()
def plotTrajectory(d, xy=True, z=False, angles=False):
nrows = xy+z+angles
irows = 1
if xy:
ax = plt.subplot(nrows,1,1)
ax.plot([ e.translation[0] for e in d ], [ e.translation[1] for e in d ])
ax.set_aspect('equal', adjustable='box')
irows = 2
if z:
plt.subplot(nrows,1,irows)
plt.plot([ e.translation[2] for e in d ], label="tz")
irows += 1
if angles:
plt.subplot(nrows,1,irows)
plt.plot([ pinocchio.log3(e.rotation) for e in d ])
plt.legend(["rx", "ry", "rz"])
plt.show()
# m = MoCap
# b = base
# w = world
#
# Odometry:
# wMb(i+1) = wMb(i) * exp(v*dt)
# wMm(i+1) * mMb = wMm(i) * mMb * exp(v*dt)
# bMm * wMm(i+1).inverse() * wMm(i) * mMb * exp(v*dt) = Id
#
# Balanced odometry drift
#
# Global positioning
# wMm(i) = wMb(0) * exp(v * i * dt) * bMm
# Worse because of the cumulative odometry drift.
import pinocchio
def toSE3(e):
if e[0] == 0.0: return None
return pinocchio.SE3(
#e[7:16].reshape((3,3)),
pinocchio.utils.rpyToMatrix(np.deg2rad(e[3:6])),
e[:3]/1000)
Id = pinocchio.SE3.Identity()
d0se3 = [ toSE3(e) for e in d0 ]
d1se3 = [ toSE3(e) for e in d1 ]
d2se3 = [ toSE3(e) for e in d2 ]
v0 = np.array([0. , 0., 0., 0., 0., 0.3])
v1 = np.array([0.1, 0., 0., 0., 0., -0.3])
v2 = np.array([0.1, 0., 0., 0., 0., 0.3])
dt = 0.01
xReg = 1.
K = 5
dMocap = []
dOdom = []
for wMm, v in [
(d0se3, v0),
(d1se3, v1),
(d2se3, v2),
]:
for i in range(len(wMm)-K):
if wMm[i+K] is None or wMm[i] is None: continue
dMocap.append(wMm[i+K].inverse() * wMm[i])
dOdom.append(pinocchio.exp6(K*v*dt))
subsample = 10
if subsample > 1:
dMocap = dMocap[::10]
dOdom = dOdom[::10]
def error(dMocap, dOdom, bMm):
# x regularisation
if xReg > 0:
errs = (xReg * pinocchio.log6(bMm).vector).tolist()[2:5]
else:
errs = []
assert len(dMocap) == len(dOdom)
mMb = bMm.inverse()
for dM, dO in zip(dMocap, dOdom):
P = bMm * dM * mMb * dO
errs.extend(pinocchio.log6(P).vector.tolist())
return np.array(errs)
def jacobian(dMocap, dOdom, bMm):
if xReg > 0:
J = (xReg * pinocchio.Jlog6(bMm))[2:5,:].tolist()
else:
J = []
mMb = bMm.inverse()
for dM, dO in zip(dMocap, dOdom):
P = bMm * dM * mMb * dO
JP = (dM * mMb * dO).toActionMatrixInverse() - (mMb*dO).toActionMatrixInverse()
JlogP = np.dot(pinocchio.Jlog6(P), JP)
J.extend(JlogP)
return np.array(J)
def jacobian_fd(dMocap, dOdom, bMm, eps=0.001):
dM = np.zeros((6))
err = error(dMocap, dOdom, bMm)
J = np.zeros( (err.shape[0], 6) )
for i in range(6):
dM[i] = eps
bMm2 = bMm * pinocchio.exp6(dM)
err2 = error(dMocap, dOdom, bMm2)
J[:,i] = (err2-err)/eps
dM[i] = 0.
return J
def optimize(dMocap, dOdom,
bMm = pinocchio.SE3.Identity(),
iter=100,
ethr=0.001,
Jthr=0.001,
mthr=1e-4,
fd=False,
):
def norm2(a): return np.sum(a**2)
from numpy.linalg import norm
err2 = None
while iter > 0:
err = error(dMocap, dOdom, bMm) if err2 is None else err2
els = norm2(err) if err2 is None else els2
if norm(err) < ethr:
print("Error is very small")
break
if fd:
J = jacobian_fd(dMocap, dOdom, bMm)
else:
J = jacobian(dMocap, dOdom, bMm)
if norm(J) < Jthr:
print("Jacobian is very small")
break
d,res,rank,s = np.linalg.lstsq(J, -err)
# do line search on els = norm2(err), Jls = 2 * err^T * J
# els(u) = norm2(err(q + u*d)) ~ els(0) + u * Jls * d
Jls = 2 * np.dot(err,J)
m = np.dot(Jls, d)
if abs(m) < mthr:
print("m is very small.", m)
break
assert m < 0, str(m) + " should be negative"
alpha = 1.
c = 0.1 # factor for the linear part
rate = 0.5
alphaDefault = None
while True:
bMm2 = bMm * pinocchio.exp6(alpha*d)
err2 = error(dMocap, dOdom, bMm2)
els2 = norm2(err2)
if els2 < els + c * alpha * m:
break
if alphaDefault is None and els2 < els:
alphaDefault = alpha
alpha *= rate
if alpha < 1e-5:
if alphaDefault is None:
print("failed to find a alpha that makes the error decrease. m =", m)
return bMm
print("failed to find correct alpha")
alpha = alphaDefault
bMm2 = bMm * pinocchio.exp6(alpha*d)
err2 = error(dMocap, dOdom, bMm2)
els2 = norm2(err2)
break
if iter%10 == 0:
print("{:4} {:^8} {:^8} {:^8} {:^8}".format("iter", "err", "J", "d","alpha"))
print("{:4} {:8.5} {:8.5} {:8.5} {:8.5}".format(iter, np.sqrt(els2), norm(J), norm(d), alpha))
#bMm = bMm * pinocchio.exp6(d)
bMm = bMm2
iter -= 1
return bMm
def plotError(dMocap, dOdom, bMm):
err = error(dMocap, dOdom, bMm)
plt.subplot(2,1,1)
plt.plot(err[0::6], label="tx")
plt.plot(err[1::6], label="ty")
plt.plot(err[2::6], label="tz")
plt.legend()
plt.subplot(2,1,2)
plt.plot(err[3::6], label="rx")
plt.plot(err[4::6], label="ry")
plt.plot(err[5::6], label="rz")
plt.legend()
plt.show()
def plotOdom(mMocap, bMm):
mMb = bMm.inverse()
bMocap = [ None if mM is None else mM * mMb for mM in mMocap ]
deltas = []
K = 10
for i in range(len(bMocap)-K):
if bMocap[i+K] is None or bMocap[i] is None: continue
deltas.append(pinocchio.log6(bMocap[i].inverse() * bMocap[i+K]).vector / (K*dt))
deltas = np.array(deltas)
plt.subplot(2,1,1)
plt.plot(deltas[:, 0], label="vx")
plt.plot(deltas[:, 1], label="vy")
plt.plot(deltas[:, 2], label="vz")
plt.legend()
plt.subplot(2,1,2)
plt.plot(deltas[:, 3], label="wx")
plt.plot(deltas[:, 4], label="wy")
plt.plot(deltas[:, 5], label="wz")
plt.legend()
plt.show()
bMm = optimize(dMocap, dOdom, Id, mthr=1e-8)
print(bMm)
if False:
kwargs={ 'xy': True,
'z': True,
'angles': False,
}
plt.ion()
plt.figure()
plt.title('Traj 0')
plotTrajectory(d0se3, **kwargs)
plotTrajectory([ e*bMm.inverse() for e in d0se3], **kwargs)
plt.legend(['mocap', 'calibrated'])
plt.figure()
plt.title('Traj 1')
plotTrajectory(d1se3, **kwargs)
plotTrajectory([ e*bMm.inverse() for e in d1se3], **kwargs)
plt.legend(['mocap', 'calibrated'])
plt.figure()
plt.title('Traj 2')
plotTrajectory([ e for e in d2se3 if e is not None], **kwargs)
plotTrajectory([ e*bMm.inverse() for e in d2se3 if e is not None], **kwargs)
plt.legend(['mocap', 'calibrated'])
```
#### File: tiago/deburring/generate_obstacle_model.py
```python
import hppfcl, random, numpy as np, eigenpy
# Algo to generate points
# select a face (i.e a flat convex polyhedron)
# select a point on this face
# compute the translation and rotation
def generate_srdf(bvh_file, N, output):
loader = hppfcl.MeshLoader ()
bvh = loader.load(bvh_file)
handle = """
<handle name="{name}" clearance="{clearance}">
<position xyz="{xyz}" xyzw="{xyzw}"/>
<link name="{link}" />
<mask>{mask}</mask>
</handle>"""
mask = "1 1 1 1 1 1"
clearance = 0.01
link = "cylinder"
output.write("""<robot name="cylinder">""")
for ih in range(N):
it = random.randint(0,bvh.num_tris-1)
tri = bvh.tri_indices(it)
ws = [ random.random() for _ in range(3) ]
wt = sum(ws)
ps = [ bvh.vertices(i) for i in tri ]
p = sum((wi/wt*pi for wi,pi in zip(ws,ps)))
x = -np.cross(ps[1]-ps[0], ps[2]-ps[0])
x /= np.linalg.norm(x)
p -= 0.05*x
quat = eigenpy.Quaternion.FromTwoVectors(np.array([1,0,0]), x)
output.write(handle.format(
name="handle_"+str(ih),
clearance=clearance,
xyz = " ".join([ str(v) for v in p ]),
xyzw = " ".join([ str(quat[i]) for i in range(4) ]),
link = link,
mask=mask))
output.write("""</robot>""")
if __name__ == "__main__":
bvh_file = "/home/jmirabel/devel/hpp/src/agimus-demos/meshes/cylinder.stl"
N = 10
import sys
generate_srdf(bvh_file, N, sys.stdout)
```
#### File: tiago/insert_drill/script_hpp.py
```python
from hpp.corbaserver.manipulation import Robot, loadServerPlugin, createContext, newProblem, ProblemSolver, ConstraintGraph, Rule, Constraints, CorbaClient
from hpp.gepetto.manipulation import ViewerFactory
import sys, argparse
# parse arguments
defaultContext = "corbaserver"
p = argparse.ArgumentParser (description=
'Initialize demo of Pyrene manipulating a box')
p.add_argument ('--context', type=str, metavar='context',
default=defaultContext,
help="identifier of ProblemSolver instance")
p.add_argument ('--ros-param', type=str, metavar='ros_param',
help="The name of the ROS param containing the URDF.")
args = p.parse_args ()
if args.context != defaultContext:
createContext (args.context)
isSimulation = args.context == "simulation"
Robot.urdfFilename = "package://tiago_data/robots/tiago_pal_hey5.urdf"
Robot.srdfFilename = "package://tiago_data/srdf/pal_hey5_gripper.srdf"
class Driller:
urdfFilename = "package://gerard_bauzil/urdf/driller_with_qr_drill.urdf"
srdfFilename = "package://gerard_bauzil/srdf/driller.srdf"
rootJointType = "freeflyer"
class AircraftSkin:
urdfFilename = "package://agimus_demos/urdf/aircraft_skin_with_marker.urdf"
srdfFilename = "package://agimus_demos/srdf/aircraft_skin_with_marker.srdf"
rootJointType = "anchor"
## Reduce joint range for security
def shrinkJointRange (robot, ratio):
for j in robot.jointNames:
if j[:6] != "tiago/": continue
tj = j[6:]
if tj.startswith("torso") or tj.startswith("arm") or tj.startswith("head"):
bounds = robot.getJointBounds (j)
if len (bounds) == 2:
width = bounds [1] - bounds [0]
mean = .5 * (bounds [1] + bounds [0])
m = mean - .5 * ratio * width
M = mean + .5 * ratio * width
robot.setJointBounds (j, [m, M])
print("context=" + args.context)
loadServerPlugin (args.context, "manipulation-corba.so")
client = CorbaClient(context=args.context)
client.manipulation.problem.selectProblem (args.context)
robot = Robot("robot", "tiago", rootJointType="planar", client=client)
robot.setJointBounds('tiago/root_joint', [-2, 2, -2, 2])
#robot.insertRobotSRDFModel("tiago", "tiago_data", "schunk", "_gripper")
ps = ProblemSolver(robot)
vf = ViewerFactory(ps)
vf.loadRobotModel (Driller, "driller")
robot.insertRobotSRDFModel("driller", "gerard_bauzil", "qr_drill", "")
robot.setJointBounds('driller/root_joint', [-2, 2, -2, 2, 0, 2])
ps.selectPathValidation("Graph-Dichotomy", 0)
ps.selectPathProjector("Progressive", 0.2)
ps.addPathOptimizer("EnforceTransitionSemantic")
ps.addPathOptimizer("SimpleTimeParameterization")
if isSimulation:
ps.setMaxIterProjection (1)
ps.setParameter("SimpleTimeParameterization/safety", 0.25)
ps.setParameter("SimpleTimeParameterization/order", 2)
ps.setParameter("SimpleTimeParameterization/maxAcceleration", 1.0)
ps.setParameter("ManipulationPlanner/extendStep", 0.7)
#from hpp import Quaternion
#oMsk = (0.10576, -0.0168, 1.6835) + Quaternion().fromRPY(1.8, 0, 0).toTuple()
#oMsk = (0.30576, -0.0138, 1.5835) + Quaternion().fromRPY(1.8, 0, 0).toTuple()
#vf.loadObstacleModel(skinTagUrdf, "skin")
#vf.moveObstacle("skin", oMsk)
vf.loadObjectModel (AircraftSkin, "skin")
#vf.loadRobotModelFromString ("skin", AircraftSkin.rootJointType, AircraftSkin.urdfString, AircraftSkin.srdfString)
#robot.setRootJointPosition("skin", oMsk)
#robot.setJointPosition("skin/root_joint", oMsk)
shrinkJointRange(robot, 0.95)
q0 = robot.getCurrentConfig()
q0[:4] = [0, -0.9, 0, 1]
q0[robot.rankInConfiguration['tiago/torso_lift_joint']] = 0.15
q0[robot.rankInConfiguration['tiago/arm_1_joint']] = 0.10
q0[robot.rankInConfiguration['tiago/arm_2_joint']] = -1.47
q0[robot.rankInConfiguration['tiago/arm_3_joint']] = -0.16
q0[robot.rankInConfiguration['tiago/arm_4_joint']] = 1.87
q0[robot.rankInConfiguration['tiago/arm_5_joint']] = -1.57
q0[robot.rankInConfiguration['tiago/arm_6_joint']] = 0.01
q0[robot.rankInConfiguration['tiago/arm_7_joint']] = 0.00
q0[robot.rankInConfiguration['tiago/hand_thumb_abd_joint']] = 1.5707
q0[robot.rankInConfiguration['tiago/hand_index_abd_joint']] = 0.35
q0[robot.rankInConfiguration['tiago/hand_middle_abd_joint']] = -0.1
q0[robot.rankInConfiguration['tiago/hand_ring_abd_joint']] = -0.2
q0[robot.rankInConfiguration['tiago/hand_little_abd_joint']] = -0.35
def lockJoint(jname, q, cname=None):
if cname is None:
cname = jname
s = robot.rankInConfiguration[jname]
e = s+robot.getJointConfigSize(jname)
ps.createLockedJoint(cname, jname, q[s:e])
ps.setConstantRightHandSide(cname, True)
return cname
ljs = list()
ljs.append(lockJoint("tiago/root_joint", q0))
for n in robot.jointNames:
if n.startswith('tiago/hand_'):
ljs.append(lockJoint(n, q0))
ps.createPositionConstraint("gaze", "tiago/xtion_rgb_optical_frame", "driller/tag_joint",
(0,0,0), (0,0,0), (True,True,False))
from hpp.corbaserver.manipulation import ConstraintGraphFactory
graph = ConstraintGraph(robot, 'graph')
factory = ConstraintGraphFactory(graph)
factory.setGrippers([ "tiago/gripper", "driller/drill_tip", ])
factory.setObjects([ "driller", "skin", ],
[ [ "driller/handle", ], [ "skin/hole", ], ],
[ [ ], [ ], ])
factory.setRules([
# Tiago always hold the gripper.
Rule([ "tiago/gripper", ], [ "driller/handle", ], True), Rule([ "tiago/gripper", ], [ ".*", ], False),
# Allow to associate drill_tip with skin/hole only.
Rule([ "driller/drill_tip", ], [ "driller/handle", ], False), Rule([ "driller/drill_tip", ], [ ".*", ], True), ])
factory.generate()
graph.addConstraints(graph=True, constraints=Constraints(numConstraints=ljs))
for n in [ 'driller/drill_tip > skin/hole | 0-0_pregrasp', 'tiago/gripper grasps driller/handle : driller/drill_tip grasps skin/hole' ]:
graph.addConstraints(node=n, constraints=Constraints(numConstraints=["gaze"]))
graph.initialize()
# Constraint in this state are explicit so ps.setMaxIterProjection(1) should not
# make it fail.
res, q1, err = graph.applyNodeConstraints('tiago/gripper grasps driller/handle', q0)
q1valid, msg = robot.isConfigValid(q1)
if not q1valid:
print(msg)
assert res
ps.setInitialConfig(q1)
if not isSimulation:
qrand = q1
for i in range(100):
q2valid, q2, err = graph.generateTargetConfig('driller/drill_tip > skin/hole | 0-0', q1, qrand)
if q2valid:
q2valid, msg = robot.isConfigValid(q2)
if q2valid:
break
qrand = robot.shootRandomConfig()
assert q2valid
if not isSimulation:
ps.addGoalConfig(q2)
ps.solve()
try:
v = vf.createViewer()
v (q1)
except:
pass
``` |
{
"source": "jmirabel/sot-dynamic-pinocchio",
"score": 3
} |
#### File: sot/dynamic_pinocchio/humanoid_robot.py
```python
from __future__ import print_function
import sys
import pinocchio
from dynamic_graph import plug
from dynamic_graph.sot.core.derivator import Derivator_of_Vector
from dynamic_graph.sot.core.op_point_modifier import OpPointModifier
from dynamic_graph.sot.core.robot_simu import RobotSimu
from dynamic_graph.tools import addTrace
from dynamic_graph.tracer_real_time import TracerRealTime
if sys.version_info.major == 2:
from abc import ABCMeta, abstractmethod
class ABC:
__metaclass__ = ABCMeta
else:
from abc import ABC, abstractmethod
class AbstractRobot(ABC):
"""
This class instantiates all the entities required to get a consistent
representation of a robot, mainly:
- device : to integrate velocities into angular control,
- dynamic: to compute forward geometry and kinematics,
- zmpFromForces: to compute ZMP force foot force sensors,
- stabilizer: to stabilize balanced motions
Operational points are stored into 'OperationalPoints' list. Some of them
are also accessible directly as attributes:
- leftWrist,
- rightWrist,
- leftAnkle,
- rightAnkle,
- Gaze.
Operational points are mapped to the actual joints in the robot model
via 'OperationalPointsMap' dictionary.
This attribute *must* be defined in the subclasses
Other attributes require to be defined:
- halfSitting: half-sitting position is the robot initial pose.
This attribute *must* be defined in subclasses.
- dynamic: The robot dynamic model.
- device: The device that integrates the dynamic equation, namely
the real robot or
a simulator
- dimension: The configuration size.
"""
def _initialize(self):
self.OperationalPoints = []
"""
Operational points are specific interesting points of the robot
used to control it.
When an operational point is defined, signals corresponding to the
point position and jacobian are created.
For instance if creating an operational point for the left-wrist,
the associated signals will be called "left-wrist" and
"Jleft-wrist" for respectively the position and the jacobian.
"""
self.AdditionalFrames = []
"""
Additional frames are frames which are defined w.r.t an operational point
and provides an interesting transformation.
It can be used, for instance, to store the sensor location.
The contained elements must be triplets matching:
- additional frame name,
- transformation w.r.t to the operational point,
- operational point file.
"""
self.frames = dict()
"""
Additional frames defined by using OpPointModifier.
"""
# FIXME: the following options are /not/ independent.
# zmp requires acceleration which requires velocity.
"""
Enable velocity computation.
"""
self.enableVelocityDerivator = False
"""
Enable acceleration computation.
"""
self.enableAccelerationDerivator = False
"""
Enable ZMP computation
"""
self.enableZmpComputation = False
"""
Tracer used to log data.
"""
self.tracer = None
"""
How much data will be logged.
"""
self.tracerSize = 2**20
"""
Automatically recomputed signals through the use
of device.after.
This list is maintained in order to clean the
signal list device.after before exiting.
"""
self.autoRecomputedSignals = []
"""
Which signals should be traced.
"""
self.tracedSignals = {
'dynamic': ["com", "zmp", "angularmomentum", "position", "velocity", "acceleration"],
'device': ['zmp', 'control', 'state']
}
def help(self):
print(AbstractHumanoidRobot.__doc__)
def _removeMimicJoints(self, urdfFile=None, urdfString=None):
""" Parse the URDF, extract the mimic joints and call removeJoints. """
# get mimic joints
import xml.etree.ElementTree as ET
if urdfFile is not None:
assert urdfString is None, "One and only one of input argument should be provided"
root = ET.parse(urdfFile)
else:
assert urdfString is not None, "One and only one of input argument should be provided"
root = ET.fromstring(urdfString)
mimicJoints = list()
for e in root.iter('joint'):
if 'name' in e.attrib:
name = e.attrib['name']
for c in e:
if hasattr(c, 'tag') and c.tag == 'mimic':
mimicJoints.append(name)
self.removeJoints(mimicJoints)
def removeJoints(self, joints):
"""
- param joints: a list of joint names to be removed from the self.pinocchioModel
"""
jointIds = list()
for j in joints:
if self.pinocchioModel.existJointName(j):
jointIds.append(self.pinocchioModel.getJointId(j))
if len(jointIds) > 0:
q = pinocchio.neutral(self.pinocchioModel)
self.pinocchioModel = pinocchio.buildReducedModel(self.pinocchioModel, jointIds, q)
self.pinocchioData = pinocchio.Data(self.pinocchioModel)
def loadModelFromString(self, urdfString, rootJointType=pinocchio.JointModelFreeFlyer, removeMimicJoints=True):
""" Load a URDF model contained in a string
- param rootJointType: the root joint type. None for no root joint.
- param removeMimicJoints: if True, all the mimic joints found in the model are removed.
"""
if rootJointType is None:
self.pinocchioModel = pinocchio.buildModelFromXML(urdfString)
else:
self.pinocchioModel = pinocchio.buildModelFromXML(urdfString, rootJointType())
self.pinocchioData = pinocchio.Data(self.pinocchioModel)
if removeMimicJoints:
self._removeMimicJoints(urdfString=urdfString)
def loadModelFromUrdf(self,
urdfPath,
urdfDir=None,
rootJointType=pinocchio.JointModelFreeFlyer,
removeMimicJoints=True):
"""
Load a model using the pinocchio urdf parser. This parser looks
for urdf files in which kinematics and dynamics information
have been added.
- param urdfPath: a path to the URDF file.
- param urdfDir: A list of directories. If None, will use ROS_PACKAGE_PATH.
"""
if urdfPath.startswith("package://"):
from os import path
n1 = 10 # len("package://")
n2 = urdfPath.index(path.sep, n1)
pkg = urdfPath[n1:n2]
relpath = urdfPath[n2 + 1:]
import rospkg
rospack = rospkg.RosPack()
abspkg = rospack.get_path(pkg)
urdfFile = path.join(abspkg, relpath)
else:
urdfFile = urdfPath
if urdfDir is None:
import os
urdfDir = os.environ["ROS_PACKAGE_PATH"].split(':')
if rootJointType is None:
self.pinocchioModel = pinocchio.buildModelFromUrdf(urdfFile)
else:
self.pinocchioModel = pinocchio.buildModelFromUrdf(urdfFile, rootJointType())
self.pinocchioData = pinocchio.Data(self.pinocchioModel)
if removeMimicJoints:
self._removeMimicJoints(urdfFile=urdfFile)
def initializeOpPoints(self):
for op in self.OperationalPoints:
self.dynamic.createOpPoint(op, self.OperationalPointsMap[op])
def createFrame(self, frameName, transformation, operationalPoint):
frame = OpPointModifier(frameName)
frame.setTransformation(transformation)
plug(self.dynamic.signal(operationalPoint), frame.positionIN)
plug(self.dynamic.signal("J{0}".format(operationalPoint)), frame.jacobianIN)
frame.position.recompute(frame.position.time + 1)
frame.jacobian.recompute(frame.jacobian.time + 1)
return frame
def setJointValueInConfig(self, q, jointNames, jointValues):
"""
q: configuration to update
jointNames: list of existing joint names in self.pinocchioModel
jointValues: corresponding joint values.
"""
model = self.pinocchioModel
for jn, jv in zip(jointNames, jointValues):
assert model.existJointName(jn)
joint = model.joints[model.getJointId(jn)]
q[joint.idx_q] = jv
@abstractmethod
def defineHalfSitting(self, q):
"""
Define half sitting configuration using the pinocchio Model (i.e.
with quaternions and not with euler angles).
method setJointValueInConfig may be usefull to implement this function.
"""
pass
def initializeRobot(self):
"""
If the robot model is correctly loaded, this method will then
initialize the operational points, set the position to
half-sitting with null velocity/acceleration.
To finish, different tasks are initialized:
- the center of mass task used to keep the robot stability
- one task per operational point to ease robot control
"""
if not hasattr(self, 'dynamic'):
raise RuntimeError("Dynamic robot model must be initialized first")
if not hasattr(self, 'device') or self.device is None:
# raise RuntimeError("A device is already defined.")
self.device = RobotSimu(self.name + '_device')
self.device.resize(self.dynamic.getDimension())
"""
Robot timestep
"""
self.timeStep = self.device.getTimeStep()
# Compute half sitting configuration
import numpy
"""
Half sitting configuration.
"""
self.halfSitting = numpy.asarray(pinocchio.neutral(self.pinocchioModel)).flatten().tolist()
self.defineHalfSitting(self.halfSitting)
self.halfSitting[3:7] = [0., 0., 0.] # Replace quaternion by RPY.
# Set the device limits.
def get(s):
s.recompute(0)
return s.value
def opposite(v):
return [-x for x in v]
self.device.setPositionBounds(get(self.dynamic.lowerJl), get(self.dynamic.upperJl))
self.device.setVelocityBounds(opposite(get(self.dynamic.upperVl)), get(self.dynamic.upperVl))
self.device.setTorqueBounds(opposite(get(self.dynamic.upperTl)), get(self.dynamic.upperTl))
# Freeflyer reference frame should be the same as global
# frame so that operational point positions correspond to
# position in freeflyer frame.
self.device.set(self.halfSitting)
plug(self.device.state, self.dynamic.position)
if self.enableVelocityDerivator:
self.velocityDerivator = Derivator_of_Vector('velocityDerivator')
self.velocityDerivator.dt.value = self.timeStep
plug(self.device.state, self.velocityDerivator.sin)
plug(self.velocityDerivator.sout, self.dynamic.velocity)
else:
self.dynamic.velocity.value = self.dimension * (0., )
if self.enableAccelerationDerivator:
self.accelerationDerivator = \
Derivator_of_Vector('accelerationDerivator')
self.accelerationDerivator.dt.value = self.timeStep
plug(self.velocityDerivator.sout, self.accelerationDerivator.sin)
plug(self.accelerationDerivator.sout, self.dynamic.acceleration)
else:
self.dynamic.acceleration.value = self.dimension * (0., )
def addTrace(self, entityName, signalName):
if self.tracer:
self.autoRecomputedSignals.append('{0}.{1}'.format(entityName, signalName))
addTrace(self, self.tracer, entityName, signalName)
def initializeTracer(self):
if not self.tracer:
self.tracer = TracerRealTime('trace')
self.tracer.setBufferSize(self.tracerSize)
self.tracer.open('/tmp/', 'dg_', '.dat')
# Recompute trace.triger at each iteration to enable tracing.
self.device.after.addSignal('{0}.triger'.format(self.tracer.name))
def traceDefaultSignals(self):
# Geometry / operational points
for s in self.OperationalPoints + self.tracedSignals['dynamic']:
self.addTrace(self.dynamic.name, s)
# Geometry / frames
for (frameName, _, _) in self.AdditionalFrames:
for s in ['position', 'jacobian']:
self.addTrace(self.frames[frameName].name, s)
# Device
for s in self.tracedSignals['device']:
self.addTrace(self.device.name, s)
if type(self.device) != RobotSimu:
self.addTrace(self.device.name, 'robotState')
# Misc
if self.enableVelocityDerivator:
self.addTrace(self.velocityDerivator.name, 'sout')
if self.enableAccelerationDerivator:
self.addTrace(self.accelerationDerivator.name, 'sout')
def __init__(self, name, tracer=None):
self._initialize()
self.name = name
# Initialize tracer if necessary.
if tracer:
self.tracer = tracer
def __del__(self):
if self.tracer:
self.stopTracer()
def startTracer(self):
"""
Start the tracer if it does not already been stopped.
"""
if self.tracer:
self.tracer.start()
def stopTracer(self):
"""
Stop and destroy tracer.
"""
if self.tracer:
self.tracer.dump()
self.tracer.stop()
self.tracer.close()
self.tracer.clear()
for s in self.autoRecomputedSignals:
self.device.after.rmSignal(s)
self.tracer = None
def reset(self, posture=None):
"""
Restart the control from another position.
This method has not been extensively tested and
should be used carefully.
In particular, tasks should be removed from the
solver before attempting a reset.
"""
if not posture:
posture = self.halfSitting
self.device.set(posture)
self.dynamic.com.recompute(self.device.state.time + 1)
self.dynamic.Jcom.recompute(self.device.state.time + 1)
for op in self.OperationalPoints:
self.dynamic.signal(self.OperationalPointsMap[op]).recompute(self.device.state.time + 1)
self.dynamic.signal('J' + self.OperationalPointsMap[op]).recompute(self.device.state.time + 1)
class AbstractHumanoidRobot(AbstractRobot):
def __init__(self, name, tracer=None):
AbstractRobot.__init__(self, name, tracer)
def _initialize(self):
AbstractRobot._initialize(self)
self.OperationalPoints.extend(['left-wrist', 'right-wrist', 'left-ankle', 'right-ankle', 'gaze'])
``` |
{
"source": "jmirabel/sot-talos",
"score": 3
} |
#### File: sot/pyrene/prologue.py
```python
print("Prologue Pyrene TALOS Robot")
from dynamic_graph.entity import PyEntityFactoryClass
from dynamic_graph.sot.pyrene.robot import Robot
# Create the device.
# This entity behaves exactly like robotsimu except:
# 1. it does not provide the increment command
# 2. it forwards the robot control to the sot-abstract
# controller.
def makeRobot ():
DeviceTalos = PyEntityFactoryClass('DeviceTalos')
# Create the robot using the device.
robot = Robot(name = 'robot', device = DeviceTalos('PYRENE'))
robot.dynamic.com.recompute (0)
_com = robot.dynamic.com.value
robot.device.zmp.value = (_com[0], _com[1], 0.)
return robot
####################################
# --- IMPORTANT --- #
# #
# THIS FILE MUST NEVER BE CHANGED. #
# TO RUN YOUR EXPERIMENT, PLEASE #
# WRITE A SEPARATE PYTHON MODULE #
# AND LAUNCH IT USING dg-remote! #
####################################
```
#### File: sot/pyrene/seqplay.py
```python
import numpy as np
from math import sqrt
from dynamic_graph.sot.pyrene.robot import Robot
from dynamic_graph.sot.core import RPYToMatrix
from dynamic_graph.sot.tools.se3 import SE3, SO3, R3
robot = Robot ('seqplay')
rpy2matrix = RPYToMatrix ('rpy2matrix')
m = 56.868
g = 9.81
pos = None
zmp = None
hip = None
def convert (filename) :
"""
Convert a seqplay file in OpenHRP format to sot-tool format
"""
global pos, zmp, hip
openhrpPos = np.genfromtxt (filename + '.pos')
openhrpZmp = np.genfromtxt (filename + '.zmp')
nbConfig = len (openhrpPos)
if len (openhrpZmp) != nbConfig :
raise RuntimeError (filename + ".pos and " + filename +
".zmp have different lengths.")
try:
openhrpHip = np.genfromtxt (filename + '.hip')
except IOError:
hip = []
for i in range (len (openhrpPos)):
hip.append ((openhrpPos [i][0], 0, 0, 0,))
openhrpHip = np.array (hip)
if len (openhrpHip) != nbConfig :
raise RuntimeError (filename + ".pos and " + filename +
".hip have different lengths.")
t = 1
featurePos = []
featureLa = []
featureRa = []
featureCom = []
forceRightFoot = []
forceLeftFoot = []
fixedFoot = None
fixedLeftFoot = None
fixedRightFoot = None
for (pos, zmp, hip) in zip (openhrpPos, openhrpZmp, openhrpHip) :
translation = 3*(0.,)
config = list (translation + tuple (hip [1:]) + tuple (pos [1:31]))
robot.dynamic.position.value = tuple (config)
robot.dynamic.position.time = t
robot.com.recompute (t)
robot.leftAnkle.position.recompute (t)
robot.rightAnkle.position.recompute (t)
lf = SE3 (robot.leftAnkle.position.value) * R3 (0., 0., -0.107)
rf = SE3 (robot.rightAnkle.position.value) * R3 (0., 0., -0.107)
# find support foot
rpy2matrix.sin.value = tuple (hip [1:])
rpy2matrix.sout.recompute (t)
waist = SE3 (rpy2matrix.sout.value, translation)
zmpGlob = waist * R3 (tuple (zmp [1:]))
# fr = m g * (zmpGlob - lf | rf - lf)/||rf - lf||^2
# fl = (m g - fr)
fr = m * g * ((zmpGlob - lf)*(rf - lf)/((rf - lf)*(rf - lf)))
fl = m * g - fr
if (lf - zmpGlob) * (lf - zmpGlob) < (rf - zmpGlob) * (rf - zmpGlob) :
supportFoot = lf
fixedFoot = fixedLeftFoot
else :
supportFoot = rf
fixedFoot = fixedRightFoot
t+=1
# move support foot to previous value
if fixedFoot is None:
config [2] -= supportFoot [2]
else:
config [0] += fixedFoot [0] - supportFoot [0]
config [1] += fixedFoot [1] - supportFoot [1]
config [2] += fixedFoot [2] - supportFoot [2]
robot.dynamic.position.value = tuple (config)
robot.dynamic.position.time = t
robot.com.recompute (t)
robot.leftAnkle.position.recompute (t)
robot.rightAnkle.position.recompute (t)
featurePos.append (config)
featureCom.append (robot.com.value)
featureLa.append (robot.leftAnkle.position.value)
featureRa.append (robot.rightAnkle.position.value)
forceLeftFoot.append ((0.,0.,fl,0.,0.,0.,))
forceRightFoot.append ((0.,0.,fr,0.,0.,0.,))
t += 1
fixedLeftFoot = \
SE3 (robot.leftAnkle.position.value) * R3 (0., 0., -0.107)
fixedRightFoot = \
SE3 (robot.rightAnkle.position.value) * R3 (0., 0., -0.107)
filePos = open (filename + '.posture', 'w')
fileLa = open (filename + '.la', 'w')
fileRa = open (filename + '.ra', 'w')
fileCom = open (filename + '.com', 'w')
fileFl = open (filename + '.fl', 'w')
fileFr = open (filename + '.fr', 'w')
dt = .005
for (pos, la, ra, com,
force_lf, force_rf, i) in zip (featurePos, featureLa, featureRa,
featureCom, forceLeftFoot,
forceRightFoot, xrange (10000000)) :
t = i*dt
filePos.write ("{0}".format (t))
fileLa.write ("{0}".format (t))
fileRa.write ("{0}".format (t))
fileCom.write ("{0}".format (t))
fileFl.write ("{0}".format (t))
fileFr.write ("{0}".format (t))
for x in pos:
filePos.write ("\t{0}".format (x))
for row in la:
for x in row:
fileLa.write ("\t{0}".format (x))
for row in ra:
for x in row:
fileRa.write ("\t{0}".format (x))
for x in com:
fileCom.write ("\t{0}".format (x))
for x in force_lf:
fileFl.write ("\t{0}".format (x))
for x in force_rf:
fileFr.write ("\t{0}".format (x))
filePos.write ("\n")
fileLa.write ("\n")
fileRa.write ("\n")
fileCom.write ("\n")
fileFl.write ("\n")
fileFr.write ("\n")
filePos.close ()
fileLa.close ()
fileRa.close ()
fileCom.close ()
fileFl.close ()
fileFr.close ()
if __name__ == '__main__':
#convert ('/opt/grx3.0/HRP2LAAS/etc/walkfwd')
```
#### File: sot-talos/tests/test_seqplay.py
```python
URDFPATH = "~/git/pyrene/talos-data"+"/robots/talos_reduced.urdf"
URDFDIR = ["~/git/pyrene/talos-data"+"/../"]
MOTION_SEQUENCE = "~/git/pyrene/pyrene-motions/grabHandrail15/stairs_15cm_handrail_grab_actuated"
DISPLAY = True
dt = 1e-3
robotName = 'TALOS'
OperationalPointsMap = {'left-wrist' : 'arm_left_7_joint',
'right-wrist' : 'arm_right_7_joint',
'left-ankle' : 'leg_left_6_joint',
'right-ankle' : 'leg_right_6_joint',
'gaze' : 'head_2_joint',
'waist' : 'root_joint',
'chest' : 'torso_2_joint'}
halfSitting = (0.0, 0.0, 1.018213, 0.00 , 0.0, 0.0, #Free flyer
0.0, 0.0, -0.411354, 0.859395, -0.448041, -0.001708, #Left Leg
0.0, 0.0, -0.411354, 0.859395, -0.448041, -0.001708, #Right Leg
0.0 , 0.006761, #Chest
0.25847 , 0.173046, -0.0002, -0.525366, 0.0, -0.0, 0.1, 0.1, #Left Arm
-0.25847 , -0.173046, 0.0002 , -0.525366, 0.0, 0.0, 0.1, 0.1, #Right Arm
0., 0. #Head
)
#-----------------------------------------------------------------------------
#---- ROBOT SPECIFICATIONS----------------------------------------------------
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
#---- DYN --------------------------------------------------------------------
#-----------------------------------------------------------------------------
from pinocchio.robot_wrapper import RobotWrapper
import pinocchio as se3
from dynamic_graph.sot.dynamics_pinocchio import fromSotToPinocchio
pinocchioRobot = RobotWrapper(URDFPATH, URDFDIR, se3.JointModelFreeFlyer())
pinocchioRobot.initDisplay(loadModel=DISPLAY)
if DISPLAY:
pinocchioRobot.display(fromSotToPinocchio(halfSitting))
from dynamic_graph.sot.dynamics_pinocchio.humanoid_robot import HumanoidRobot
robot = HumanoidRobot(robotName, pinocchioRobot.model,
pinocchioRobot.data, halfSitting, OperationalPointsMap)
# ------------------------------------------------------------------------------
# ---- Kinematic Stack of Tasks (SoT) -----------------------------------------
# ------------------------------------------------------------------------------
from dynamic_graph import plug
from dynamic_graph.sot.core import SOT
sot = SOT('sot')
sot.setSize(robot.dynamic.getDimension())
plug(sot.control,robot.device.control)
# DEFINE POSTURE TASK
from dynamic_graph.sot.core import Task, FeatureGeneric, GainAdaptive
from dynamic_graph.sot.core.meta_tasks import setGain
from dynamic_graph.sot.core.matrix_util import matrixToTuple
from numpy import identity, hstack, zeros
task_name = "posture_task"
taskPosture = Task(task_name)
taskPosture.dyn = robot.dynamic
taskPosture.feature = FeatureGeneric('feature_'+task_name)
taskPosture.featureDes = FeatureGeneric('feature_des_'+task_name)
taskPosture.gain = GainAdaptive("gain_"+task_name)
robotDim = robot.dynamic.getDimension()
first_6 = zeros((32,6))
other_dof = identity(robotDim-6)
jacobian_posture = hstack([first_6, other_dof])
taskPosture.feature.jacobianIN.value = matrixToTuple( jacobian_posture )
taskPosture.feature.setReference(taskPosture.featureDes.name)
taskPosture.add(taskPosture.feature.name)
#DEFINE SEQUENCE PLAYER
from dynamic_graph.sot.tools import SimpleSeqPlay
seqplay = SimpleSeqPlay("seq_play")
seqplay.load(MOTION_SEQUENCE)
#MAKE CONNECTIONS
from dynamic_graph.sot.core import Selec_of_vector
plug(seqplay.posture, taskPosture.featureDes.errorIN)
getPostureValue = Selec_of_vector("current_posture")
getPostureValue.selec(6,robotDim)
plug(robot.dynamic.position, getPostureValue.sin)
plug(getPostureValue.sout, taskPosture.feature.errorIN)
plug(getPostureValue.sout, seqplay.currentPosture)
setGain(taskPosture.gain,(4.9,0.9,0.01,0.9))
plug(taskPosture.gain.gain, taskPosture.controlGain)
plug(taskPosture.error, taskPosture.gain.error)
#START SEQUENCE PLAYER
seqplay.start()
taskPosture.featureDes.errorIN.recompute(0)
#PUSH TO SOLVER
sot.push(taskPosture.name)
#-------------------------------------------------------------------------------
#----- MAIN LOOP ---------------------------------------------------------------
#-------------------------------------------------------------------------------
def runner(n):
for i in xrange(n):
robot.device.increment(dt)
pinocchioRobot.display(fromSotToPinocchio(robot.device.state.value))
runner(3000)
``` |
{
"source": "JMIsham/twister2_kafka",
"score": 2
} |
#### File: util/maven/workspace_parser.py
```python
import ast
class WorkspaceVisitor(ast.NodeVisitor):
def __init__(self):
self.artifacts = {}
def visit_Call(self, rule):
if rule.func.id is not 'maven_jar': return
name = None
artifact = None
for keyword in rule.keywords:
if keyword.arg == 'name':
name = keyword.value.s
if keyword.arg == 'artifact':
artifact = keyword.value.s
self.artifacts['@%s//jar:jar' % name] = artifact
def maven_artifacts():
visitor = WorkspaceVisitor()
with open('WORKSPACE', 'r') as workspace:
visitor.visit(ast.parse(workspace.read()))
return visitor.artifacts
``` |
{
"source": "JMishou/node-red-envirophat",
"score": 3
} |
#### File: JMishou/node-red-envirophat/enviro.py
```python
import io
import os
import sys
import glob
import time
import errno
import ctypes
import select
import struct
import inspect
import threading
import traceback
from envirophat import light, weather, motion
files = [sys.stdin]
last_hf_time = time.time()
last_lf_time = time.time()
hf_interval = 0.5 # Approx 10/s
lf_interval = 1
hf_enabled = False
lf_enabled = False
scroll = None
def process_command(data):
global hf_enabled, lf_enabled, hf_interval, lf_interval
if data[0] == "M":
if data[1] == '0':
hf_enabled = False
else:
hf_enabled = True
elif data[0] == "E":
if data[1] == '0':
lf_enabled = False
else:
lf_enabled = True
elif data[0] == "I":
if data[1] == 'M':
hf_interval = float(data[2:len(data)])
elif data[1] == 'E':
lf_interval = float(data[2:len(data)])
def idle_work():
global last_hf_time, last_lf_time
now = time.time()
if hf_enabled and (now-last_hf_time > hf_interval):
mag = motion.magnetometer()
accel = motion.accelerometer()
h = motion.heading()
print "M%0.10f,%0.10f,%0.10f,%d,%d,%d,%0.2f"%(accel.x,accel.y,accel.z,mag.x,mag.y,mag.z,h)
last_hf_time = now
if lf_enabled and (now-last_lf_time > lf_interval):
rgb = light.rgb()
t = round(weather.temperature(),2)
p = round(weather.pressure(),2)
c = light.light()
r = rgb[0]
g = rgb[1]
b = rgb[2]
print "E%0.2f,%0.2f,%d,%d,%d,%d"%(t,p,c,r,g,b)
last_lf_time = now
def main_loop():
# while still waiting for input on at least one file
try:
while files:
ready = select.select(files, [], [], 0.01)[0]
if not ready:
idle_work()
else:
for file in ready:
if file == sys.stdin:
line = file.readline()
if not line: # EOF, remove file from input list
sys.exit(0)
elif line.rstrip(): # optional: skipping empty lines
process_command(line)
except:
sys.exit(traceback.format_exc())
try:
main_loop()
except KeyboardInterrupt:
pass
``` |
{
"source": "jmishra01/Custom-button-in-Matplotlib-toolbar",
"score": 3
} |
#### File: Custom-button-in-Matplotlib-toolbar/custom_tool_button/common.py
```python
import os
DIRECTORY = os.path.dirname(__file__)
IMAGE_DIRECTORY = os.path.join(DIRECTORY, 'images')
XAXIS_LIST = list(range(-10, 100))
LEFT_POSI = 0
RIGHT_POSI = 10
SHIFT = 10
class Icons:
def __init__(self, dir_path):
self._dir_path = dir_path
def get_path(self) -> str:
return self._dir_path
def icon_path(self, im_path: str) -> str:
return os.path.join(self._dir_path, im_path)
def update_canvas(cls_instance):
cls_instance.canvas.draw_idle()
def fb_shift(cls_instance, x_lower_value, x_higher_value):
cls_instance.canvas.figure.gca().set_xlim(x_lower_value, x_higher_value)
update_canvas(cls_instance)
def backward_shift(cls_instance):
def wrapper():
global LEFT_POSI, RIGHT_POSI
try:
LEFT_POSI -= SHIFT
RIGHT_POSI -= SHIFT
fb_shift(cls_instance, XAXIS_LIST[LEFT_POSI], XAXIS_LIST[RIGHT_POSI])
except IndexError:
LEFT_POSI += SHIFT
RIGHT_POSI += SHIFT
return wrapper
def forward_shift(cls_instance):
def wrapper():
global LEFT_POSI, RIGHT_POSI
try:
LEFT_POSI += SHIFT
RIGHT_POSI += SHIFT
fb_shift(cls_instance, XAXIS_LIST[LEFT_POSI], XAXIS_LIST[RIGHT_POSI])
except IndexError:
LEFT_POSI -= SHIFT
RIGHT_POSI -= SHIFT
return wrapper
icons = Icons(IMAGE_DIRECTORY)
TOOLITEMS = [('Backward Shift', 'Shift chart backward', icons.icon_path('bshift.png'), backward_shift),
('Forward Shift', 'Shift chart forward', icons.icon_path('fshift.png'), forward_shift)]
``` |
{
"source": "jmishra01/VerticaPy",
"score": 2
} |
#### File: verticapy/learn/model_selection.py
```python
import statistics, random, time
from collections import Iterable
from itertools import product
import numpy as np
from typing import Union
# VerticaPy Modules
import verticapy
from verticapy import vDataFrame
from verticapy.utilities import *
from verticapy.toolbox import *
from verticapy.errors import *
from verticapy.plot import gen_colors
from verticapy.learn.tools import does_model_exist
from verticapy.learn.mlplot import plot_bubble_ml, plot_stepwise_ml, plot_importance
# Other Python Modules
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
# ---#
def bayesian_search_cv(
estimator,
input_relation: Union[str, vDataFrame],
X: list,
y: str,
metric: str = "auto",
cv: int = 3,
pos_label: Union[int, float, str] = None,
cutoff: float = -1,
param_grid: Union[dict, list] = {},
random_nbins: int = 16,
bayesian_nbins: int = None,
random_grid: bool = False,
lmax: int = 15,
nrows: int = 100000,
k_tops: int = 10,
RFmodel_params: dict = {},
print_info: bool = True,
**kwargs,
):
"""
---------------------------------------------------------------------------
Computes the k-fold bayesian search of an estimator using a random
forest model to estimate a probable optimal set of parameters.
Parameters
----------
estimator: object
Vertica estimator with a fit method and a database cursor.
input_relation: str/vDataFrame
Relation to use to train the model.
X: list
List of the predictor columns.
y: str
Response Column.
metric: str, optional
Metric used to do the model evaluation.
auto: logloss for classification & rmse for regression.
For Classification:
accuracy : Accuracy
auc : Area Under the Curve (ROC)
bm : Informedness = tpr + tnr - 1
csi : Critical Success Index = tp / (tp + fn + fp)
f1 : F1 Score
logloss : Log Loss
mcc : Matthews Correlation Coefficient
mk : Markedness = ppv + npv - 1
npv : Negative Predictive Value = tn / (tn + fn)
prc_auc : Area Under the Curve (PRC)
precision : Precision = tp / (tp + fp)
recall : Recall = tp / (tp + fn)
specificity : Specificity = tn / (tn + fp)
For Regression:
max : Max error
mae : Mean absolute error
median : Median absolute error
mse : Mean squared error
msle : Mean squared log error
r2 : R-squared coefficient
r2a : R2 adjusted
rmse : Root-mean-squared error
var : Explained variance
cv: int, optional
Number of folds.
pos_label: int/float/str, optional
The main class to be considered as positive (classification only).
cutoff: float, optional
The model cutoff (classification only).
param_grid: dict/list, optional
Dictionary of the parameters to test. It can also be a list of the
different combinations. If empty, a parameter grid will be generated.
random_nbins: int, optional
Number of bins used to compute the different parameters categories
in the random parameters generation.
bayesian_nbins: int, optional
Number of bins used to compute the different parameters categories
in the bayesian table generation.
random_grid: bool, optional
If True, the rows used to find the optimal function will be
used randomnly. Otherwise, they will be regularly spaced.
lmax: int, optional
Maximum length of each parameter list.
nrows: int, optional
Number of rows to use when performing the bayesian search.
k_tops: int, optional
When performing the bayesian search, the final stage will be to retrain the top
possible combinations. 'k_tops' represents the number of models to train at
this stage to find the most efficient model.
RFmodel_params: dict, optional
Dictionary of the random forest model parameters used to estimate a probable
optimal set of parameters.
print_info: bool, optional
If True, prints the model information at each step.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
if print_info:
print(f"\033[1m\033[4mStarting Bayesian Search\033[0m\033[0m\n")
print(f"\033[1m\033[4mStep 1 - Computing Random Models using Grid Search\033[0m\033[0m\n")
if not(param_grid):
param_grid = gen_params_grid(estimator, random_nbins, len(X), lmax, 0)
param_gs = grid_search_cv(
estimator,
param_grid,
input_relation,
X,
y,
metric,
cv,
pos_label,
cutoff,
True,
"no_print",
print_info,
final_print="no_print",
)
if "enet" not in kwargs:
params = []
for param_grid in param_gs["parameters"]:
params += [elem for elem in param_grid]
all_params = list(dict.fromkeys(params))
else:
all_params = ["C", "l1_ratio",]
if not(bayesian_nbins):
bayesian_nbins = max(int(np.exp(np.log(nrows) / len(all_params))), 1)
result = {}
for elem in all_params:
result[elem] = []
for param_grid in param_gs["parameters"]:
for item in all_params:
if item in param_grid:
result[item] += [param_grid[item]]
else:
result[item] += [None]
result["score"] = param_gs["avg_score"]
if 'max_features' in result:
for idx, elem in enumerate(result["max_features"]):
if elem == "auto":
result["max_features"][idx] = int(np.floor(np.sqrt(len(X))) + 1)
elif elem == "max":
result["max_features"][idx] = int(len(X))
result = tablesample(result).to_sql()
schema = verticapy.options["temp_schema"]
relation = "{}.verticapy_temp_table_bayesian_{}".format(schema, get_session(estimator.cursor))
model_name = "{}.verticapy_temp_rf_{}".format(schema, get_session(estimator.cursor))
estimator.cursor.execute("DROP TABLE IF EXISTS {}".format(relation))
estimator.cursor.execute("CREATE TABLE {} AS {}".format(relation, result))
if print_info:
print(f"\033[1m\033[4mStep 2 - Fitting the RF model with the hyperparameters data\033[0m\033[0m\n")
if verticapy.options["tqdm"] and print_info:
from tqdm.auto import tqdm
loop = tqdm(range(1))
else:
loop = range(1)
for j in loop:
if "enet" not in kwargs:
model_grid = gen_params_grid(estimator, nbins=bayesian_nbins, max_nfeatures=len(all_params), optimized_grid=-666)
else:
model_grid = {"C": {"type": float, "range": [0.0, 10], "nbins": bayesian_nbins}, "l1_ratio": {"type": float, "range": [0.0, 1.0], "nbins": bayesian_nbins},}
all_params = list(dict.fromkeys(model_grid))
from verticapy.learn.ensemble import RandomForestRegressor
hyper_param_estimator = RandomForestRegressor(name=estimator.name, cursor=estimator.cursor, **RFmodel_params,)
hyper_param_estimator.fit(relation, all_params, "score")
from verticapy.datasets import gen_meshgrid, gen_dataset
if random_grid:
vdf = gen_dataset(model_grid, estimator.cursor, nrows=nrows,)
else:
vdf = gen_meshgrid(model_grid, estimator.cursor,)
estimator.cursor.execute("DROP TABLE IF EXISTS {}".format(relation))
vdf.to_db(relation, relation_type="table", inplace=True)
vdf = hyper_param_estimator.predict(vdf, name="score")
reverse = reverse_score(metric)
vdf.sort({"score": "desc" if reverse else "asc"})
result = vdf.head(limit = k_tops)
new_param_grid = []
for i in range(k_tops):
param_tmp_grid = {}
for elem in result.values:
if elem != "score":
param_tmp_grid[elem] = result[elem][i]
new_param_grid += [param_tmp_grid]
if print_info:
print(f"\033[1m\033[4mStep 3 - Computing Most Probable Good Models using Grid Search\033[0m\033[0m\n")
result = grid_search_cv(
estimator,
new_param_grid,
input_relation,
X,
y,
metric,
cv,
pos_label,
cutoff,
True,
"no_print",
print_info,
final_print="no_print",
)
for elem in result.values:
result.values[elem] += param_gs[elem]
data = []
keys = [elem for elem in result.values]
for i in range(len(result[keys[0]])):
data += [tuple([result[elem][i] for elem in result.values])]
data.sort(key=lambda tup: tup[1], reverse=reverse)
for idx, elem in enumerate(result.values):
result.values[elem] = [item[idx] for item in data]
hyper_param_estimator.drop()
if print_info:
print("\033[1mBayesian Search Selected Model\033[0m")
print(f"Parameters: {result['parameters'][0]}; \033[91mTest_score: {result['avg_score'][0]}\033[0m; \033[92mTrain_score: {result['avg_train_score'][0]}\033[0m; \033[94mTime: {result['avg_time'][0]}\033[0m;")
estimator.cursor.execute("DROP TABLE IF EXISTS {}".format(relation))
return result
# ---#
def best_k(
input_relation: Union[str, vDataFrame],
X: list = [],
cursor=None,
n_cluster: Union[tuple, list] = (1, 100),
init: Union[str, list] = "kmeanspp",
max_iter: int = 50,
tol: float = 1e-4,
elbow_score_stop: float = 0.8,
**kwargs,
):
"""
---------------------------------------------------------------------------
Finds the k-means k based on a score.
Parameters
----------
input_relation: str/vDataFrame
Relation to use to train the model.
X: list, optional
List of the predictor columns. If empty, all numerical columns will
be used.
cursor: DBcursor, optional
Vertica database cursor.
n_cluster: tuple/list, optional
Tuple representing the number of clusters to start and end with.
This can also be customized list with various k values to test.
init: str/list, optional
The method to use to find the initial cluster centers.
kmeanspp : Use the k-means++ method to initialize the centers.
random : Randomly subsamples the data to find initial centers.
It can be also a list with the initial cluster centers to use.
max_iter: int, optional
The maximum number of iterations for the algorithm.
tol: float, optional
Determines whether the algorithm has converged. The algorithm is considered
converged after no center has moved more than a distance of 'tol' from the
previous iteration.
elbow_score_stop: float, optional
Stops searching for parameters when the specified elbow score is reached.
Returns
-------
int
the KMeans K
"""
if isinstance(X, str):
X = [X]
check_types(
[
("X", X, [list],),
("input_relation", input_relation, [str, vDataFrame],),
("n_cluster", n_cluster, [list],),
("init", init, ["kmeanspp", "random"],),
("max_iter", max_iter, [int, float],),
("tol", tol, [int, float],),
("elbow_score_stop", elbow_score_stop, [int, float],),
]
)
from verticapy.learn.cluster import KMeans
cursor, conn = check_cursor(cursor, input_relation)[0:2]
if isinstance(n_cluster, tuple):
L = range(n_cluster[0], n_cluster[1])
else:
L = n_cluster
L.sort()
schema, relation = schema_relation(input_relation)
if not (schema):
schema = verticapy.options["temp_schema"]
schema = str_column(schema)
if verticapy.options["tqdm"] and ("tqdm" not in kwargs or ("tqdm" in kwargs and kwargs["tqdm"])):
from tqdm.auto import tqdm
loop = tqdm(L)
else:
loop = L
for i in loop:
cursor.execute(
"DROP MODEL IF EXISTS {}.__VERTICAPY_TEMP_MODEL_KMEANS_{}__".format(
schema, get_session(cursor)
)
)
model = KMeans(
"{}.__VERTICAPY_TEMP_MODEL_KMEANS_{}__".format(schema, get_session(cursor)),
cursor,
i,
init,
max_iter,
tol,
)
model.fit(input_relation, X)
score = model.metrics_.values["value"][3]
if score > elbow_score_stop:
return i
score_prev = score
if conn:
conn.close()
print(
"\u26A0 The K was not found. The last K (= {}) is returned with an elbow score of {}".format(
i, score
)
)
return i
# ---#
def cross_validate(
estimator,
input_relation: Union[str, vDataFrame],
X: list,
y: str,
metric: Union[str, list] = "all",
cv: int = 3,
pos_label: Union[int, float, str] = None,
cutoff: float = -1,
show_time: bool = True,
training_score: bool = False,
**kwargs,
):
"""
---------------------------------------------------------------------------
Computes the K-Fold cross validation of an estimator.
Parameters
----------
estimator: object
Vertica estimator with a fit method and a database cursor.
input_relation: str/vDataFrame
Relation to use to train the model.
X: list
List of the predictor columns.
y: str
Response Column.
metric: str/list, optional
Metric used to do the model evaluation. It can also be a list of metrics.
all: The model will compute all the possible metrics.
For Classification:
accuracy : Accuracy
auc : Area Under the Curve (ROC)
best_cutoff : Cutoff which optimised the ROC Curve prediction.
bm : Informedness = tpr + tnr - 1
csi : Critical Success Index = tp / (tp + fn + fp)
f1 : F1 Score
logloss : Log Loss
mcc : Matthews Correlation Coefficient
mk : Markedness = ppv + npv - 1
npv : Negative Predictive Value = tn / (tn + fn)
prc_auc : Area Under the Curve (PRC)
precision : Precision = tp / (tp + fp)
recall : Recall = tp / (tp + fn)
specificity : Specificity = tn / (tn + fp)
For Regression:
aic : Akaike’s information criterion
bic : Bayesian information criterion
max : Max error
mae : Mean absolute error
median : Median absolute error
mse : Mean squared error
msle : Mean squared log error
r2 : R-squared coefficient
r2a : R2 adjusted
rmse : Root-mean-squared error
var : Explained variance
cv: int, optional
Number of folds.
pos_label: int/float/str, optional
The main class to be considered as positive (classification only).
cutoff: float, optional
The model cutoff (classification only).
show_time: bool, optional
If set to True, the time and the average time will be added to the report.
training_score: bool, optional
If set to True, the training score will be computed with the validation score.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
if isinstance(X, str):
X = [X]
check_types(
[
("X", X, [list],),
("input_relation", input_relation, [str, vDataFrame],),
("y", y, [str],),
("metric", metric, [str, list],),
("cv", cv, [int, float],),
("cutoff", cutoff, [int, float],),
]
)
if isinstance(input_relation, str):
input_relation = vdf_from_relation(input_relation, cursor=estimator.cursor)
if cv < 2:
raise ParameterError("Cross Validation is only possible with at least 2 folds")
if category_from_model_type(estimator.type)[0] == "regressor":
all_metrics = [
"explained_variance",
"max_error",
"median_absolute_error",
"mean_absolute_error",
"mean_squared_error",
"root_mean_squared_error",
"r2",
"r2_adj",
"aic",
"bic",
]
elif category_from_model_type(estimator.type)[0] == "classifier":
all_metrics = [
"auc",
"prc_auc",
"accuracy",
"log_loss",
"precision",
"recall",
"f1_score",
"mcc",
"informedness",
"markedness",
"csi",
]
else:
raise Exception(
"Cross Validation is only possible for Regressors and Classifiers"
)
if metric == "all":
final_metrics = all_metrics
elif isinstance(metric, str):
final_metrics = [metric]
else:
final_metrics = metric
result = {"index": final_metrics}
if training_score:
result_train = {"index": final_metrics}
total_time = []
if verticapy.options["tqdm"] and ("tqdm" not in kwargs or ("tqdm" in kwargs and kwargs["tqdm"])):
from tqdm.auto import tqdm
loop = tqdm(range(cv))
else:
loop = range(cv)
for i in loop:
try:
estimator.drop()
except:
pass
random_state = verticapy.options["random_state"]
random_state = (
random.randint(-10e6, 10e6) if not (random_state) else random_state + i
)
train, test = input_relation.train_test_split(
test_size=float(1 / cv), order_by=[X[0]], random_state=random_state
)
start_time = time.time()
estimator.fit(
train, X, y, test,
)
total_time += [time.time() - start_time]
if category_from_model_type(estimator.type)[0] == "regressor":
if metric == "all":
result["{}-fold".format(i + 1)] = estimator.regression_report().values[
"value"
]
if training_score:
estimator.test_relation = estimator.input_relation
result_train[
"{}-fold".format(i + 1)
] = estimator.regression_report().values["value"]
elif isinstance(metric, str):
result["{}-fold".format(i + 1)] = [estimator.score(metric)]
if training_score:
estimator.test_relation = estimator.input_relation
result_train["{}-fold".format(i + 1)] = [estimator.score(metric)]
else:
result["{}-fold".format(i + 1)] = [estimator.score(m) for m in metric]
if training_score:
estimator.test_relation = estimator.input_relation
result_train["{}-fold".format(i + 1)] = [
estimator.score(m) for m in metric
]
else:
if (len(estimator.classes_) > 2) and (pos_label not in estimator.classes_):
raise ParameterError(
"'pos_label' must be in the estimator classes, it must be the main class to study for the Cross Validation"
)
elif (len(estimator.classes_) == 2) and (
pos_label not in estimator.classes_
):
pos_label = estimator.classes_[1]
try:
if metric == "all":
result["{}-fold".format(i + 1)] = estimator.classification_report(
labels=[pos_label], cutoff=cutoff
).values["value"][0:-1]
if training_score:
estimator.test_relation = estimator.input_relation
result_train[
"{}-fold".format(i + 1)
] = estimator.classification_report(
labels=[pos_label], cutoff=cutoff
).values[
"value"
][
0:-1
]
elif isinstance(metric, str):
result["{}-fold".format(i + 1)] = [
estimator.score(metric, pos_label=pos_label, cutoff=cutoff)
]
if training_score:
estimator.test_relation = estimator.input_relation
result_train["{}-fold".format(i + 1)] = [
estimator.score(metric, pos_label=pos_label, cutoff=cutoff)
]
else:
result["{}-fold".format(i + 1)] = [
estimator.score(m, pos_label=pos_label, cutoff=cutoff)
for m in metric
]
if training_score:
estimator.test_relation = estimator.input_relation
result_train["{}-fold".format(i + 1)] = [
estimator.score(m, pos_label=pos_label, cutoff=cutoff)
for m in metric
]
except:
if metric == "all":
result["{}-fold".format(i + 1)] = estimator.classification_report(
cutoff=cutoff
).values["value"][0:-1]
if training_score:
estimator.test_relation = estimator.input_relation
result_train[
"{}-fold".format(i + 1)
] = estimator.classification_report(cutoff=cutoff).values[
"value"
][
0:-1
]
elif isinstance(metric, str):
result["{}-fold".format(i + 1)] = [
estimator.score(metric, cutoff=cutoff)
]
if training_score:
estimator.test_relation = estimator.input_relation
result_train["{}-fold".format(i + 1)] = [
estimator.score(metric, cutoff=cutoff)
]
else:
result["{}-fold".format(i + 1)] = [
estimator.score(m, cutoff=cutoff) for m in metric
]
if training_score:
estimator.test_relation = estimator.input_relation
result_train["{}-fold".format(i + 1)] = [
estimator.score(m, cutoff=cutoff) for m in metric
]
try:
estimator.drop()
except:
pass
n = len(final_metrics)
total = [[] for item in range(n)]
for i in range(cv):
for k in range(n):
total[k] += [result["{}-fold".format(i + 1)][k]]
if training_score:
total_train = [[] for item in range(n)]
for i in range(cv):
for k in range(n):
total_train[k] += [result_train["{}-fold".format(i + 1)][k]]
result["avg"], result["std"] = [], []
if training_score:
result_train["avg"], result_train["std"] = [], []
for item in total:
result["avg"] += [statistics.mean([float(elem) for elem in item])]
result["std"] += [statistics.stdev([float(elem) for elem in item])]
if training_score:
for item in total_train:
result_train["avg"] += [statistics.mean([float(elem) for elem in item])]
result_train["std"] += [statistics.stdev([float(elem) for elem in item])]
total_time += [
statistics.mean([float(elem) for elem in total_time]),
statistics.stdev([float(elem) for elem in total_time]),
]
result = tablesample(values=result).transpose()
if show_time:
result.values["time"] = total_time
if training_score:
result_train = tablesample(values=result_train).transpose()
if show_time:
result_train.values["time"] = total_time
if training_score:
return result, result_train
else:
return result
# ---#
def elbow(
input_relation: Union[str, vDataFrame],
X: list = [],
cursor=None,
n_cluster: Union[tuple, list] = (1, 15),
init: Union[str, list] = "kmeanspp",
max_iter: int = 50,
tol: float = 1e-4,
ax=None,
**style_kwds,
):
"""
---------------------------------------------------------------------------
Draws an elbow curve.
Parameters
----------
input_relation: str/vDataFrame
Relation to use to train the model.
X: list, optional
List of the predictor columns. If empty all the numerical vcolumns will
be used.
cursor: DBcursor, optional
Vertica database cursor.
n_cluster: tuple/list, optional
Tuple representing the number of cluster to start with and to end with.
It can also be customized list with the different K to test.
init: str/list, optional
The method to use to find the initial cluster centers.
kmeanspp : Use the k-means++ method to initialize the centers.
random : Randomly subsamples the data to find initial centers.
Alternatively, you can specify a list with the initial custer centers.
max_iter: int, optional
The maximum number of iterations for the algorithm.
tol: float, optional
Determines whether the algorithm has converged. The algorithm is considered
converged after no center has moved more than a distance of 'tol' from the
previous iteration.
ax: Matplotlib axes object, optional
The axes to plot on.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
if isinstance(X, str):
X = [X]
check_types(
[
("X", X, [list],),
("input_relation", input_relation, [str, vDataFrame],),
("n_cluster", n_cluster, [list],),
("init", init, ["kmeanspp", "random"],),
("max_iter", max_iter, [int, float],),
("tol", tol, [int, float],),
]
)
cursor, conn = check_cursor(cursor, input_relation)[0:2]
version(cursor=cursor, condition=[8, 0, 0])
if isinstance(n_cluster, tuple):
L = range(n_cluster[0], n_cluster[1])
else:
L = n_cluster
L.sort()
schema, relation = schema_relation(input_relation)
all_within_cluster_SS = []
if isinstance(n_cluster, tuple):
L = [i for i in range(n_cluster[0], n_cluster[1])]
else:
L = n_cluster
L.sort()
if verticapy.options["tqdm"]:
from tqdm.auto import tqdm
loop = tqdm(L)
else:
loop = L
for i in loop:
cursor.execute(
"DROP MODEL IF EXISTS {}.VERTICAPY_KMEANS_TMP_{}".format(
schema, get_session(cursor)
)
)
from verticapy.learn.cluster import KMeans
model = KMeans(
"{}.VERTICAPY_KMEANS_TMP_{}".format(schema, get_session(cursor)),
cursor,
i,
init,
max_iter,
tol,
)
model.fit(input_relation, X)
all_within_cluster_SS += [float(model.metrics_.values["value"][3])]
model.drop()
if conn:
conn.close()
if not (ax):
fig, ax = plt.subplots()
if isnotebook():
fig.set_size_inches(8, 6)
ax.grid(axis="y")
param = {
"color": gen_colors()[0],
"marker": "o",
"markerfacecolor": "white",
"markersize": 7,
"markeredgecolor": "black",
}
ax.plot(
L, all_within_cluster_SS, **updated_dict(param, style_kwds),
)
ax.set_title("Elbow Curve")
ax.set_xlabel("Number of Clusters")
ax.set_ylabel("Between-Cluster SS / Total SS")
values = {"index": L, "Within-Cluster SS": all_within_cluster_SS}
return tablesample(values=values)
# ---#
def enet_search_cv(
input_relation: Union[str, vDataFrame],
X: list,
y: str,
metric: str = "auto",
cv: int = 3,
estimator_type: str = "auto",
cutoff: float = -1,
cursor=None,
print_info: bool = True,
**kwargs,
):
"""
---------------------------------------------------------------------------
Computes the k-fold grid search using multiple ENet models.
Parameters
----------
input_relation: str/vDataFrame
Relation to use to train the model.
X: list
List of the predictor columns.
y: str
Response Column.
metric: str, optional
Metric used to do the model evaluation.
auto: logloss for classification & rmse for regression.
For Classification:
accuracy : Accuracy
auc : Area Under the Curve (ROC)
bm : Informedness = tpr + tnr - 1
csi : Critical Success Index = tp / (tp + fn + fp)
f1 : F1 Score
logloss : Log Loss
mcc : Matthews Correlation Coefficient
mk : Markedness = ppv + npv - 1
npv : Negative Predictive Value = tn / (tn + fn)
prc_auc : Area Under the Curve (PRC)
precision : Precision = tp / (tp + fp)
recall : Recall = tp / (tp + fn)
specificity : Specificity = tn / (tn + fp)
For Regression:
max : Max error
mae : Mean absolute error
median : Median absolute error
mse : Mean squared error
msle : Mean squared log error
r2 : R-squared coefficient
r2a : R2 adjusted
rmse : Root-mean-squared error
var : Explained variance
cv: int, optional
Number of folds.
estimator_type: str, optional
Estimator Type.
auto : detects if it is a Logit Model or ENet.
logit: Logistic Regression
enet : ElasticNet
cutoff: float, optional
The model cutoff (logit only).
cursor: DBcursor, optional
Vertica database cursor.
print_info: bool, optional
If set to True, prints the model information at each step.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
check_types([("estimator_type", estimator_type, ["logit", "enet", "auto",]),])
cursor, conn, input_relation = check_cursor(cursor, input_relation)
param_grid = parameter_grid({"solver": ["cgd",],
"penalty": ["enet",],
"C": [1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1.0, 5.0, 10.0, 50.0, 100.0] if "small" not in kwargs else [1e-1, 1.0, 10.0,],
"l1_ratio": [0.1 * i for i in range(0, 10)] if "small" not in kwargs else [0.1, 0.5, 0.9,]})
from verticapy.learn.linear_model import LogisticRegression, ElasticNet
if estimator_type == "auto":
if not(isinstance(input_relation, vDataFrame)):
vdf = vdf_from_relation(input_relation, cursor=cursor)
else:
vdf = input_relation
if sorted(vdf[y].distinct()) == [0, 1]:
estimator_type = "logit"
else:
estimator_type = "enet"
if estimator_type == "logit":
estimator = LogisticRegression("\"{}\".verticapy_enet_search_{}".format(verticapy.options["temp_schema"], get_session(cursor),), cursor=cursor)
else:
estimator = ElasticNet("\"{}\".verticapy_enet_search_{}".format(verticapy.options["temp_schema"], get_session(cursor),), cursor=cursor)
result = bayesian_search_cv(
estimator,
input_relation,
X,
y,
metric,
cv,
None,
cutoff,
param_grid,
random_grid=False,
bayesian_nbins=1000,
print_info=print_info,
enet=True,
)
if conn:
conn.close()
return result
# ---#
def gen_params_grid(estimator,
nbins: int = 10,
max_nfeatures: int = 3,
lmax: int = -1,
optimized_grid: int = 0,):
"""
---------------------------------------------------------------------------
Generates the estimator grid.
Parameters
----------
estimator: object
Vertica estimator with a fit method and a database cursor.
nbins: int, optional
Number of bins used to discretize numberical features.
max_nfeatures: int, optional
Maximum number of features used to compute Random Forest, PCA...
lmax: int, optional
Maximum length of the parameter grid.
optimized_grid: int, optional
If set to 0, the randomness is based on the input parameters.
If set to 1, the randomness is limited to some parameters while others
are picked based on a default grid.
If set to 2, there is no randomness and a default grid is returned.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
from verticapy.learn.cluster import KMeans, BisectingKMeans, DBSCAN
from verticapy.learn.decomposition import PCA, SVD
from verticapy.learn.ensemble import RandomForestRegressor, RandomForestClassifier, XGBoostRegressor, XGBoostClassifier
from verticapy.learn.linear_model import LinearRegression, ElasticNet, Lasso, Ridge, LogisticRegression
from verticapy.learn.naive_bayes import NaiveBayes
from verticapy.learn.neighbors import KNeighborsRegressor, KNeighborsClassifier, LocalOutlierFactor, NearestCentroid
from verticapy.learn.preprocessing import Normalizer, OneHotEncoder
from verticapy.learn.svm import LinearSVC, LinearSVR
from verticapy.learn.tree import DummyTreeRegressor, DummyTreeClassifier, DecisionTreeRegressor, DecisionTreeClassifier
params_grid = {}
if isinstance(estimator, (DummyTreeRegressor, DummyTreeClassifier, OneHotEncoder,)):
return params_grid
elif isinstance(estimator, (RandomForestRegressor, RandomForestClassifier, DecisionTreeRegressor, DecisionTreeClassifier,)):
if optimized_grid == 0:
params_grid = {"max_features": ["auto", "max"] + list(range(1, max_nfeatures, math.ceil(max_nfeatures / nbins))),
"max_leaf_nodes": list(range(1, int(1e9), math.ceil(int(1e9) / nbins))),
"max_depth": list(range(1, 100, math.ceil(100 / nbins))),
"min_samples_leaf": list(range(1, int(1e6), math.ceil(int(1e6) / nbins))),
"min_info_gain": [elem / 1000 for elem in range(1, 1000, math.ceil(1000 / nbins))],
"nbins": list(range(2, 100, math.ceil(100 / nbins))),}
if isinstance(RandomForestRegressor, RandomForestClassifier,):
params_grid["sample"] = [elem / 1000 for elem in range(1, 1000, math.ceil(1000 / nbins))]
params_grid["n_estimators"] = list(range(1, 100, math.ceil(100 / nbins)))
elif optimized_grid == 1:
params_grid = {"max_features": ["auto", "max"],
"max_leaf_nodes": [32, 64, 128, 1000, 1e4, 1e6, 1e9],
"max_depth": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20, 30, 40, 50],
"min_samples_leaf": [1, 2, 3, 4, 5],
"min_info_gain": [0.0, 0.1, 0.2],
"nbins": [10, 15, 20, 25, 30, 35, 40],}
if isinstance(RandomForestRegressor, RandomForestClassifier,):
params_grid["sample"] = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
params_grid["n_estimators"] = [1, 5, 10, 15, 20, 30, 40, 50, 100]
elif optimized_grid == 2:
params_grid = {"max_features": ["auto", "max"],
"max_leaf_nodes": [32, 64, 128, 1000,],
"max_depth": [4, 5, 6,],
"min_samples_leaf": [1, 2,],
"min_info_gain": [0.0,],
"nbins": [32,],}
if isinstance(RandomForestRegressor, RandomForestClassifier,):
params_grid["sample"] = [0.7,]
params_grid["n_estimators"] = [20,]
elif optimized_grid == -666:
result = {"max_features": {"type": int, "range": [1, max_nfeatures,], "nbins": nbins,},
"max_leaf_nodes": {"type": int, "range": [32, 1e9,], "nbins": nbins,},
"max_depth": {"type": int, "range": [2, 30,], "nbins": nbins,},
"min_samples_leaf": {"type": int, "range": [1, 15,], "nbins": nbins,},
"min_info_gain": {"type": float, "range": [0.0, 0.1,], "nbins": nbins,},
"nbins": {"type": int, "range": [10, 1000,], "nbins": nbins,},}
if isinstance(RandomForestRegressor, RandomForestClassifier,):
result["sample"] = {"type": float, "range": [0.1, 1.0,], "nbins": nbins,}
result["n_estimators"] = {"type": int, "range": [1, 100,], "nbins": nbins,}
return result
elif isinstance(estimator, (LinearSVC, LinearSVR,)):
if optimized_grid == 0:
params_grid = {"tol": [1e-4, 1e-6, 1e-8],
"C": [elem / 1000 for elem in range(1, 5000, math.ceil(5000 / nbins))],
"fit_intercept": [False, True],
"intercept_mode": ["regularized", "unregularized"],
"max_iter": [100, 500, 1000],}
elif optimized_grid == 1:
params_grid = {"tol": [1e-6],
"C": [1e-1, 0.0, 1.0, 10.0,],
"fit_intercept": [True],
"intercept_mode": ["regularized", "unregularized"],
"max_iter": [100],}
elif optimized_grid == 2:
params_grid = {"tol": [1e-6],
"C": [0.0, 1.0,],
"fit_intercept": [True],
"intercept_mode": ["regularized", "unregularized"],
"max_iter": [100],}
elif optimized_grid == -666:
return {"tol": {"type": float, "range": [1e-8, 1e-2,], "nbins": nbins,},
"C": {"type": float, "range": [0.0, 1000.0,], "nbins": nbins,},
"fit_intercept": {"type": bool,},
"intercept_mode": {"type": str, "values": ["regularized", "unregularized"]},
"max_iter": {"type": int, "range": [10, 1000,], "nbins": nbins,},}
elif isinstance(estimator, (XGBoostClassifier, XGBoostRegressor,)):
if optimized_grid == 0:
params_grid = {"nbins": list(range(2, 100, math.ceil(100 / nbins))),
"max_depth": list(range(1, 20, math.ceil(100 / nbins))),
"weight_reg": [elem / 1000 for elem in range(1, 1000, math.ceil(1000 / nbins))],
"min_split_loss": [elem / 1000 for elem in range(1, 1000, math.ceil(1000 / nbins))],
"learning_rate": [elem / 1000 for elem in range(1, 1000, math.ceil(1000 / nbins))],
#"sample": [elem / 1000 for elem in range(1, 1000, math.ceil(1000 / nbins))],
"tol": [1e-4, 1e-6, 1e-8],
"max_ntree": list(range(1, 100, math.ceil(100 / nbins)))}
elif optimized_grid == 1:
params_grid = {"nbins": [10, 15, 20, 25, 30, 35, 40],
"max_depth": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20,],
"weight_reg": [0.0, 0.5, 1.0, 2.0],
"min_split_loss": [0.0, 0.1, 0.25],
"learning_rate": [0.01, 0.05, 0.1, 1.0],
#"sample": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
"tol": [1e-8],
"max_ntree": [1, 10, 20, 30, 40, 50, 100]}
elif optimized_grid == 2:
params_grid = {"nbins": [32,],
"max_depth": [3, 4, 5,],
"weight_reg": [0.0, 0.25,],
"min_split_loss": [0.0,],
"learning_rate": [0.05, 0.1, 1.0,],
#"sample": [0.5, 0.6, 0.7,],
"tol": [1e-8],
"max_ntree": [20,]}
elif optimized_grid == -666:
return {"nbins": {"type": int, "range": [2, 100,], "nbins": nbins,},
"max_depth": {"type": int, "range": [1, 20,], "nbins": nbins,},
"weight_reg": {"type": float, "range": [0.0, 1.0,], "nbins": nbins,},
"min_split_loss": {"type": float, "values": [0.0, 0.25,], "nbins": nbins,},
"learning_rate": {"type": float, "range": [0.0, 1.0,], "nbins": nbins,},
"sample": {"type": float, "range": [0.0, 1.0,], "nbins": nbins,},
"tol": {"type": float, "range": [1e-8, 1e-2,], "nbins": nbins,},
"max_ntree": {"type": int, "range": [1, 20,], "nbins": nbins,},}
elif isinstance(estimator, NaiveBayes):
if optimized_grid == 0:
params_grid = {"alpha": [elem / 1000 for elem in range(1, 1000, math.ceil(1000 / nbins))]}
elif optimized_grid == 1:
params_grid = {"alpha": [0.01, 0.1, 1.0, 5.0, 10.0,]}
elif optimized_grid == 2:
params_grid = {"alpha": [0.01, 1.0, 10.0,]}
elif optimized_grid == -666:
return {"alpha": {"type": float, "range": [0.00001, 1000.0,], "nbins": nbins,},}
elif isinstance(estimator, (PCA, SVD)):
if optimized_grid == 0:
params_grid = {"max_features": list(range(1, max_nfeatures, math.ceil(max_nfeatures / nbins))),}
if isinstance(estimator, (PCA,)):
params_grid["scale"] = [False, True]
if optimized_grid == -666:
return {"scale": {"type": bool,}, "max_features": {"type": int, "range": [1, max_nfeatures,], "nbins": nbins,},}
elif isinstance(estimator, (Normalizer,)):
params_grid = {"method": ["minmax", "robust_zscore", "zscore"]}
if optimized_grid == -666:
return {"method": {"type": str, "values": ["minmax", "robust_zscore", "zscore"]},}
elif isinstance(estimator, (KNeighborsRegressor, KNeighborsClassifier, LocalOutlierFactor, NearestCentroid,)):
if optimized_grid == 0:
params_grid = {"p": [1, 2] + list(range(3, 100, math.ceil(100 / (nbins - 2)))),}
if isinstance(estimator, (KNeighborsRegressor, KNeighborsClassifier, LocalOutlierFactor,)):
params_grid["n_neighbors"] = list(range(1, 100, math.ceil(100 / (nbins))))
elif optimized_grid == 1:
params_grid = {"p": [1, 2, 3, 4],}
if isinstance(estimator, (KNeighborsRegressor, KNeighborsClassifier, LocalOutlierFactor,)):
params_grid["n_neighbors"] = [1, 2, 3, 4, 5, 10, 20, 100]
elif optimized_grid == 2:
params_grid = {"p": [1, 2,],}
if isinstance(estimator, (KNeighborsRegressor, KNeighborsClassifier, LocalOutlierFactor,)):
params_grid["n_neighbors"] = [5, 10,]
elif optimized_grid == -666:
return {"p": {"type": int, "range": [1, 10,], "nbins": nbins,},
"n_neighbors": {"type": int, "range": [1, 100,], "nbins": nbins,},}
elif isinstance(estimator, (DBSCAN,)):
if optimized_grid == 0:
params_grid = {"p": [1, 2] + list(range(3, 100, math.ceil(100 / (nbins - 2)))),
"eps": [elem / 1000 for elem in range(1, 1000, math.ceil(1000 / nbins))],
"min_samples": list(range(1, 1000, math.ceil(1000 / nbins))),}
elif optimized_grid == 1:
params_grid = {"p": [1, 2, 3, 4],
"min_samples": [1, 2, 3, 4, 5, 10, 100],}
elif optimized_grid == 2:
params_grid = {"p": [1, 2,],
"min_samples": [5, 10,],}
elif optimized_grid == -666:
return {"p": {"type": int, "range": [1, 10,], "nbins": nbins,},
"min_samples": {"type": int, "range": [1, 100,], "nbins": nbins,},}
elif isinstance(estimator, (LogisticRegression, LinearRegression, ElasticNet, Lasso, Ridge,)):
if optimized_grid == 0:
params_grid = {"tol": [1e-4, 1e-6, 1e-8],
"max_iter": [100, 500, 1000],}
if isinstance(estimator, LogisticRegression):
params_grid["penalty"] = ["none", "l1", "l2", "enet",]
if isinstance(estimator, LinearRegression):
params_grid["solver"] = ["newton", "bfgs",]
elif isinstance(estimator, (Lasso, LogisticRegression, ElasticNet)):
params_grid["solver"] = ["newton", "bfgs", "cgd",]
if isinstance(estimator, (Lasso, Ridge, ElasticNet, LogisticRegression)):
params_grid["C"] = [elem / 1000 for elem in range(1, 5000, math.ceil(5000 / nbins))]
if isinstance(estimator, (LogisticRegression, ElasticNet)):
params_grid["l1_ratio"] = [elem / 1000 for elem in range(1, 1000, math.ceil(1000 / nbins))]
elif optimized_grid == 1:
params_grid = {"tol": [1e-6],
"max_iter": [100],}
if isinstance(estimator, LogisticRegression):
params_grid["penalty"] = ["none", "l1", "l2", "enet",]
if isinstance(estimator, LinearRegression):
params_grid["solver"] = ["newton", "bfgs",]
elif isinstance(estimator, (Lasso, LogisticRegression, ElasticNet)):
params_grid["solver"] = ["newton", "bfgs", "cgd",]
if isinstance(estimator, (Lasso, Ridge, ElasticNet, LogisticRegression)):
params_grid["C"] = [1e-1, 0.0, 1.0, 10.0,]
if isinstance(estimator, (LogisticRegression,)):
params_grid["penalty"] = ["none", "l1", "l2", "enet"]
if isinstance(estimator, (LogisticRegression, ElasticNet)):
params_grid["l1_ratio"] = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
elif optimized_grid == 2:
params_grid = {"tol": [1e-6],
"max_iter": [100],}
if isinstance(estimator, LogisticRegression):
params_grid["penalty"] = ["none", "l1", "l2", "enet",]
if isinstance(estimator, LinearRegression):
params_grid["solver"] = ["newton", "bfgs",]
elif isinstance(estimator, (Lasso, LogisticRegression, ElasticNet)):
params_grid["solver"] = ["bfgs", "cgd",]
if isinstance(estimator, (Lasso, Ridge, ElasticNet, LogisticRegression)):
params_grid["C"] = [1.0,]
if isinstance(estimator, (LogisticRegression,)):
params_grid["penalty"] = ["none", "l1", "l2", "enet"]
if isinstance(estimator, (LogisticRegression, ElasticNet)):
params_grid["l1_ratio"] = [0.5,]
elif optimized_grid == -666:
result = {"tol": {"type": float, "range": [1e-8, 1e-2,], "nbins": nbins,},
"max_iter": {"type": int, "range": [1, 1000,], "nbins": nbins,},}
if isinstance(estimator, LogisticRegression):
result["penalty"] = {"type": str, "values": ["none", "l1", "l2", "enet",]}
if isinstance(estimator, LinearRegression):
result["solver"] = {"type": str, "values": ["newton", "bfgs",]}
elif isinstance(estimator, (Lasso, LogisticRegression, ElasticNet)):
result["solver"] = {"type": str, "values": ["bfgs", "cgd",]}
if isinstance(estimator, (Lasso, Ridge, ElasticNet, LogisticRegression)):
result["C"] = {"type": float, "range": [0.0, 1000.0,], "nbins": nbins,}
if isinstance(estimator, (LogisticRegression,)):
result["penalty"] = {"type": str, "values": ["none", "l1", "l2", "enet",]}
if isinstance(estimator, (LogisticRegression, ElasticNet)):
result["l1_ratio"] = {"type": float, "range": [0.0, 1.0,], "nbins": nbins,}
return result
elif isinstance(estimator, KMeans):
if optimized_grid == 0:
params_grid = {"n_cluster": list(range(2, 100, math.ceil(100 / nbins))),
"init": ["kmeanspp", "random"],
"max_iter": [100, 500, 1000],
"tol": [1e-4, 1e-6, 1e-8],}
elif optimized_grid == 1:
params_grid = {"n_cluster": [2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20, 50, 100, 200, 300, 1000],
"init": ["kmeanspp", "random"],
"max_iter": [1000],
"tol": [1e-8],}
elif optimized_grid == 2:
params_grid = {"n_cluster": [2, 3, 4, 5, 10, 20, 100,],
"init": ["kmeanspp",],
"max_iter": [1000],
"tol": [1e-8],}
elif optimized_grid == -666:
return {"tol": {"type": float, "range": [1e-2, 1e-8,], "nbins": nbins,},
"max_iter": {"type": int, "range": [1, 1000,], "nbins": nbins,},
"n_cluster": {"type": int, "range": [1, 10000,], "nbins": nbins,},
"init": {"type": str, "values": ["kmeanspp", "random"],},}
elif isinstance(estimator, BisectingKMeans):
if optimized_grid == 0:
params_grid = {"n_cluster": list(range(2, 100, math.ceil(100 / nbins))),
"bisection_iterations": list(range(10, 1000, math.ceil(1000 / nbins))),
"split_method": ["size", "sum_squares"],
"min_divisible_cluster_size": list(range(2, 100, math.ceil(100 / nbins))),
"init": ["kmeanspp", "pseudo"],
"max_iter": [100, 500, 1000],
"tol": [1e-4, 1e-6, 1e-8],}
elif optimized_grid == 1:
params_grid = {"n_cluster": [2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20, 50, 100, 200, 300, 1000],
"bisection_iterations": list(range(10, 1000, math.ceil(1000 / nbins))),
"split_method": ["size", "sum_squares"],
"min_divisible_cluster_size": list(range(2, 100, math.ceil(100 / nbins))),
"init": ["kmeanspp", "pseudo"],
"max_iter": [1000],
"tol": [1e-8],}
elif optimized_grid == 2:
params_grid = {"n_cluster": [2, 3, 4, 5, 10, 20, 100,],
"bisection_iterations": [1, 2, 3,],
"split_method": ["sum_squares",],
"min_divisible_cluster_size": [2, 3, 4,],
"init": ["kmeanspp",],
"max_iter": [1000],
"tol": [1e-8],}
elif optimized_grid == -666:
return {"tol": {"type": float, "range": [1e-8, 1e-2,], "nbins": nbins,},
"max_iter": {"type": int, "range": [1, 1000,], "nbins": nbins,},
"bisection_iterations": {"type": int, "range": [1, 1000,], "nbins": nbins,},
"split_method": {"type": str, "values": ["sum_squares",],},
"n_cluster": {"type": int, "range": [1, 10000,], "nbins": nbins,},
"init": {"type": str, "values": ["kmeanspp", "pseudo"],},}
params_grid = parameter_grid(params_grid)
final_param_grid = []
for param in params_grid:
if "C" in param and param["C"] == 0:
del param["C"]
if "l1_ratio" in param:
del param["l1_ratio"]
if "penalty" in param:
param["penalty"] = "none"
if "penalty" in param:
if param["penalty"] in ("none", "l2") and "solver" in param and param["solver"] == "cgd":
param["solver"] = "bfgs"
if param["penalty"] in ("none", "l1", "l2") and "l1_ratio" in param:
del param["l1_ratio"]
if param["penalty"] in ("none",) and "C" in param:
del param["C"]
if param["penalty"] in ("l1", "enet",) and "solver" in param:
param["solver"] = "cgd"
if param not in final_param_grid:
final_param_grid += [param]
if len(final_param_grid) > lmax and lmax > 0:
final_param_grid = random.sample(final_param_grid, lmax)
return final_param_grid
# ---#
def grid_search_cv(
estimator,
param_grid: Union[dict, list],
input_relation: Union[str, vDataFrame],
X: list,
y: str,
metric: str = "auto",
cv: int = 3,
pos_label: Union[int, float, str] = None,
cutoff: float = -1,
training_score: bool = True,
skip_error: bool = True,
print_info: bool = True,
**kwargs,
):
"""
---------------------------------------------------------------------------
Computes the k-fold grid search of an estimator.
Parameters
----------
estimator: object
Vertica estimator with a fit method and a database cursor.
param_grid: dict/list
Dictionary of the parameters to test. It can also be a list of the
different combinations.
input_relation: str/vDataFrame
Relation to use to train the model.
X: list
List of the predictor columns.
y: str
Response Column.
metric: str, optional
Metric used to do the model evaluation.
auto: logloss for classification & rmse for regression.
For Classification:
accuracy : Accuracy
auc : Area Under the Curve (ROC)
bm : Informedness = tpr + tnr - 1
csi : Critical Success Index = tp / (tp + fn + fp)
f1 : F1 Score
logloss : Log Loss
mcc : Matthews Correlation Coefficient
mk : Markedness = ppv + npv - 1
npv : Negative Predictive Value = tn / (tn + fn)
prc_auc : Area Under the Curve (PRC)
precision : Precision = tp / (tp + fp)
recall : Recall = tp / (tp + fn)
specificity : Specificity = tn / (tn + fp)
For Regression:
max : Max error
mae : Mean absolute error
median : Median absolute error
mse : Mean squared error
msle : Mean squared log error
r2 : R-squared coefficient
r2a : R2 adjusted
rmse : Root-mean-squared error
var : Explained variance
cv: int, optional
Number of folds.
pos_label: int/float/str, optional
The main class to be considered as positive (classification only).
cutoff: float, optional
The model cutoff (classification only).
training_score: bool, optional
If set to True, the training score will be computed with the validation score.
skip_error: bool, optional
If set to True and an error occurs, it will be displayed and not raised.
print_info: bool, optional
If set to True, prints the model information at each step.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
if isinstance(X, str):
X = [X]
check_types(
[
("metric", metric, [str]),
("param_grid", param_grid, [dict, list]),
("training_score", training_score, [bool]),
("skip_error", skip_error, [bool, str,]),
("print_info", print_info, [bool,]),
]
)
if category_from_model_type(estimator.type)[0] == "regressor" and metric == "auto":
metric = "rmse"
elif metric == "auto":
metric = "logloss"
if isinstance(param_grid, dict):
for param in param_grid:
assert isinstance(param_grid[param], Iterable) and not (
isinstance(param_grid[param], str)
), ParameterError(
f"When of type dictionary, the parameter 'param_grid' must be a dictionary where each value is a list of parameters, found {type(param_grid[param])} for parameter '{param}'."
)
all_configuration = parameter_grid(param_grid)
else:
for idx, param in enumerate(param_grid):
assert isinstance(param, dict), ParameterError(
f"When of type List, the parameter 'param_grid' must be a list of dictionaries, found {type(param)} for elem '{idx}'."
)
all_configuration = param_grid
# testing all the config
for config in all_configuration:
estimator.set_params(config)
# applying all the config
data = []
if all_configuration == []:
all_configuration = [{}]
if verticapy.options["tqdm"] and ("tqdm" not in kwargs or ("tqdm" in kwargs and kwargs["tqdm"])) and print_info:
from tqdm.auto import tqdm
loop = tqdm(all_configuration)
else:
loop = all_configuration
for config in loop:
try:
estimator.set_params(config)
current_cv = cross_validate(
estimator,
input_relation,
X,
y,
metric,
cv,
pos_label,
cutoff,
True,
training_score,
tqdm=False,
)
if training_score:
keys = [elem for elem in current_cv[0].values]
data += [
(
estimator.get_params(),
current_cv[0][keys[1]][cv],
current_cv[1][keys[1]][cv],
current_cv[0][keys[2]][cv],
current_cv[0][keys[1]][cv + 1],
current_cv[1][keys[1]][cv + 1],
)
]
if print_info:
print(f"Model: {str(estimator.__class__).split('.')[-1][:-2]}; Parameters: {config}; \033[91mTest_score: {current_cv[0][keys[1]][cv]}\033[0m; \033[92mTrain_score: {current_cv[1][keys[1]][cv]}\033[0m; \033[94mTime: {current_cv[0][keys[2]][cv]}\033[0m;")
else:
keys = [elem for elem in current_cv.values]
data += [
(
config,
current_cv[keys[1]][cv],
current_cv[keys[2]][cv],
current_cv[keys[1]][cv + 1],
)
]
if print_info:
print(f"Model: {str(estimator.__class__).split('.')[-1][:-2]}; Parameters: {config}; \033[91mTest_score: {current_cv[keys[1]][cv]}\033[0m; \033[94mTime:{current_cv[keys[2]][cv]}\033[0m;")
except Exception as e:
if skip_error and skip_error != "no_print":
print(e)
elif not(skip_error):
raise (e)
if not(data):
if training_score:
return tablesample(
{
"parameters": [],
"avg_score": [],
"avg_train_score": [],
"avg_time": [],
"score_std": [],
"score_train_std": [],
}
)
else:
return tablesample(
{
"parameters": [],
"avg_score": [],
"avg_time": [],
"score_std": [],
}
)
reverse = reverse_score(metric)
data.sort(key=lambda tup: tup[1], reverse=reverse)
if training_score:
result = tablesample(
{
"parameters": [elem[0] for elem in data],
"avg_score": [elem[1] for elem in data],
"avg_train_score": [elem[2] for elem in data],
"avg_time": [elem[3] for elem in data],
"score_std": [elem[4] for elem in data],
"score_train_std": [elem[5] for elem in data],
}
)
if print_info and ("final_print" not in kwargs or kwargs["final_print"] != "no_print"):
print("\033[1mGrid Search Selected Model\033[0m")
print(f"{str(estimator.__class__).split('.')[-1][:-2]}; Parameters: {result['parameters'][0]}; \033[91mTest_score: {result['avg_score'][0]}\033[0m; \033[92mTrain_score: {result['avg_train_score'][0]}\033[0m; \033[94mTime: {result['avg_time'][0]}\033[0m;")
else:
result = tablesample(
{
"parameters": [elem[0] for elem in data],
"avg_score": [elem[1] for elem in data],
"avg_time": [elem[2] for elem in data],
"score_std": [elem[3] for elem in data],
}
)
if print_info and ("final_print" not in kwargs or kwargs["final_print"] != "no_print"):
print("\033[1mGrid Search Selected Model\033[0m")
print(f"{str(estimator.__class__).split('.')[-1][:-2]}; Parameters: {result['parameters'][0]}; \033[91mTest_score: {result['avg_score'][0]}\033[0m; \033[94mTime: {result['avg_time'][0]}\033[0m;")
return result
# ---#
def learning_curve(
estimator,
input_relation: Union[str, vDataFrame],
X: list,
y: str,
sizes: list = [0.1, 0.33, 0.55, 0.78, 1.0],
method="efficiency",
metric: str = "auto",
cv: int = 3,
pos_label: Union[int, float, str] = None,
cutoff: float = -1,
std_coeff: float = 1,
ax=None,
**style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the learning curve.
Parameters
----------
estimator: object
Vertica estimator with a fit method and a database cursor.
input_relation: str/vDataFrame
Relation to use to train the model.
X: list
List of the predictor columns.
y: str
Response Column.
sizes: list, optional
Different sizes of the dataset used to train the model. Multiple models
will be trained using the different sizes.
method: str, optional
Method used to plot the curve.
efficiency : draws train/test score vs sample size.
performance : draws score vs time.
scalability : draws time vs sample size.
metric: str, optional
Metric used to do the model evaluation.
auto: logloss for classification & rmse for regression.
For Classification:
accuracy : Accuracy
auc : Area Under the Curve (ROC)
bm : Informedness = tpr + tnr - 1
csi : Critical Success Index = tp / (tp + fn + fp)
f1 : F1 Score
logloss : Log Loss
mcc : Matthews Correlation Coefficient
mk : Markedness = ppv + npv - 1
npv : Negative Predictive Value = tn / (tn + fn)
prc_auc : Area Under the Curve (PRC)
precision : Precision = tp / (tp + fp)
recall : Recall = tp / (tp + fn)
specificity : Specificity = tn / (tn + fp)
For Regression:
max : Max error
mae : Mean absolute error
median : Median absolute error
mse : Mean squared error
msle : Mean squared log error
r2 : R-squared coefficient
r2a : R2 adjusted
rmse : Root-mean-squared error
var : Explained variance
cv: int, optional
Number of folds.
pos_label: int/float/str, optional
The main class to be considered as positive (classification only).
cutoff: float, optional
The model cutoff (classification only).
std_coeff: float, optional
Value of the standard deviation coefficient used to compute the area plot
around each score.
ax: Matplotlib axes object, optional
The axes to plot on.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
check_types(
[("method", method, ["efficiency", "performance", "scalability"],),]
)
from verticapy.plot import range_curve
for s in sizes:
assert 0 < s <= 1, ParameterError("Each size must be in ]0,1].")
if category_from_model_type(estimator.type)[0] == "regressor" and metric == "auto":
metric = "rmse"
elif metric == "auto":
metric = "logloss"
if isinstance(input_relation, str):
input_relation = vdf_from_relation(input_relation, cursor=estimator.cursor)
lc_result_final = []
sizes = sorted(set(sizes))
if verticapy.options["tqdm"]:
from tqdm.auto import tqdm
loop = tqdm(sizes)
else:
loop = sizes
for s in loop:
relation = input_relation.sample(x=s)
lc_result = cross_validate(
estimator, relation, X, y, metric, cv, pos_label, cutoff, True, True, tqdm=False,
)
lc_result_final += [
(
relation.shape()[0],
lc_result[0][metric][cv],
lc_result[0][metric][cv + 1],
lc_result[1][metric][cv],
lc_result[1][metric][cv + 1],
lc_result[0]["time"][cv],
lc_result[0]["time"][cv + 1],
)
]
if method in ("efficiency", "scalability"):
lc_result_final.sort(key=lambda tup: tup[0])
else:
lc_result_final.sort(key=lambda tup: tup[5])
result = tablesample(
{
"n": [elem[0] for elem in lc_result_final],
metric: [elem[1] for elem in lc_result_final],
metric + "_std": [elem[2] for elem in lc_result_final],
metric + "_train": [elem[3] for elem in lc_result_final],
metric + "_train_std": [elem[4] for elem in lc_result_final],
"time": [elem[5] for elem in lc_result_final],
"time_std": [elem[6] for elem in lc_result_final],
}
)
if method == "efficiency":
X = result["n"]
Y = [
[
[
result[metric][i] - std_coeff * result[metric + "_std"][i]
for i in range(len(sizes))
],
result[metric],
[
result[metric][i] + std_coeff * result[metric + "_std"][i]
for i in range(len(sizes))
],
],
[
[
result[metric + "_train"][i]
- std_coeff * result[metric + "_train_std"][i]
for i in range(len(sizes))
],
result[metric + "_train"],
[
result[metric + "_train"][i]
+ std_coeff * result[metric + "_train_std"][i]
for i in range(len(sizes))
],
],
]
x_label = "n"
y_label = metric
labels = [
"test",
"train",
]
elif method == "performance":
X = result["time"]
Y = [
[
[
result[metric][i] - std_coeff * result[metric + "_std"][i]
for i in range(len(sizes))
],
result[metric],
[
result[metric][i] + std_coeff * result[metric + "_std"][i]
for i in range(len(sizes))
],
],
]
x_label = "time"
y_label = metric
labels = []
else:
X = result["n"]
Y = [
[
[
result["time"][i] - std_coeff * result["time_std"][i]
for i in range(len(sizes))
],
result["time"],
[
result["time"][i] + std_coeff * result["time_std"][i]
for i in range(len(sizes))
],
],
]
x_label = "n"
y_label = "time"
labels = []
range_curve(
X, Y, x_label, y_label, ax, labels, **style_kwds,
)
return result
# ---#
def lift_chart(
y_true: str,
y_score: str,
input_relation: Union[str, vDataFrame],
cursor=None,
pos_label: Union[int, float, str] = 1,
nbins: int = 30,
ax=None,
**style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the Lift Chart.
Parameters
----------
y_true: str
Response column.
y_score: str
Prediction Probability.
input_relation: str/vDataFrame
Relation to use to do the scoring. The relation can be a view or a table
or even a customized relation. For example, you could write:
"(SELECT ... FROM ...) x" as long as an alias is given at the end of the
relation.
cursor: DBcursor, optional
Vertica database cursor.
pos_label: int/float/str, optional
To compute the Lift Chart, one of the response column classes must be the
positive one. The parameter 'pos_label' represents this class.
nbins: int, optional
An integer value that determines the number of decision boundaries. Decision
boundaries are set at equally-spaced intervals between 0 and 1, inclusive.
ax: Matplotlib axes object, optional
The axes to plot on.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
check_types(
[
("y_true", y_true, [str],),
("y_score", y_score, [str],),
("input_relation", input_relation, [str, vDataFrame],),
("nbins", nbins, [int, float],),
]
)
cursor, conn, input_relation = check_cursor(cursor, input_relation)
version(cursor=cursor, condition=[8, 0, 0])
query = "SELECT LIFT_TABLE(obs, prob USING PARAMETERS num_bins = {}) OVER() FROM (SELECT (CASE WHEN {} = '{}' THEN 1 ELSE 0 END) AS obs, {}::float AS prob FROM {}) AS prediction_output"
query = query.format(nbins, y_true, pos_label, y_score, input_relation)
executeSQL(cursor, query, "Computing the Lift Table.")
query_result = cursor.fetchall()
if conn:
conn.close()
decision_boundary, positive_prediction_ratio, lift = (
[item[0] for item in query_result],
[item[1] for item in query_result],
[item[2] for item in query_result],
)
decision_boundary.reverse()
if not (ax):
fig, ax = plt.subplots()
if isnotebook():
fig.set_size_inches(8, 6)
ax.set_xlabel("Cumulative Data Fraction")
max_value = max([0 if elem != elem else elem for elem in lift])
lift = [max_value if elem != elem else elem for elem in lift]
param1 = {"color": gen_colors()[0]}
ax.plot(
decision_boundary, lift, **updated_dict(param1, style_kwds, 0),
)
param2 = {"color": gen_colors()[1]}
ax.plot(
decision_boundary,
positive_prediction_ratio,
**updated_dict(param2, style_kwds, 1),
)
color1, color2 = color_dict(style_kwds, 0), color_dict(style_kwds, 1)
if color1 == color2:
color2 = gen_colors()[1]
ax.fill_between(
decision_boundary, positive_prediction_ratio, lift, facecolor=color1, alpha=0.2
)
ax.fill_between(
decision_boundary,
[0 for elem in decision_boundary],
positive_prediction_ratio,
facecolor=color2,
alpha=0.2,
)
ax.set_title("Lift Table")
ax.set_axisbelow(True)
ax.grid()
color1 = mpatches.Patch(color=color1, label="Cumulative Lift")
color2 = mpatches.Patch(color=color2, label="Cumulative Capture Rate")
ax.legend(handles=[color1, color2], loc="center left", bbox_to_anchor=[1, 0.5])
ax.set_xlim(0, 1)
ax.set_ylim(0)
return tablesample(
values={
"decision_boundary": decision_boundary,
"positive_prediction_ratio": positive_prediction_ratio,
"lift": lift,
},
)
# ---#
def parameter_grid(param_grid: dict,):
"""
---------------------------------------------------------------------------
Generates the list of the different combinations of input parameters.
Parameters
----------
param_grid: dict
Dictionary of parameters.
Returns
-------
list of dict
List of the different combinations.
"""
check_types([("param_grid", param_grid, [dict]),])
return [dict(zip(param_grid.keys(), values)) for values in product(*param_grid.values())]
# ---#
def plot_acf_pacf(
vdf: vDataFrame,
column: str,
ts: str,
by: list = [],
p: Union[int, list] = 15,
**style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the ACF and PACF Charts.
Parameters
----------
vdf: vDataFrame
Input vDataFrame.
column: str
Response column.
ts: str
vcolumn used as timeline. It will be to use to order the data.
It can be a numerical or type date like (date, datetime, timestamp...)
vcolumn.
by: list, optional
vcolumns used in the partition.
p: int/list, optional
Int equals to the maximum number of lag to consider during the computation
or List of the different lags to include during the computation.
p must be positive or a list of positive integers.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
if isinstance(by, str):
by = [by]
check_types(
[
("column", column, [str],),
("ts", ts, [str],),
("by", by, [list],),
("p", p, [int, float],),
("vdf", vdf, [vDataFrame,],),
]
)
tmp_style = {}
for elem in style_kwds:
if elem not in ("color", "colors"):
tmp_style[elem] = style_kwds[elem]
if "color" in style_kwds:
color = style_kwds["color"]
else:
color = gen_colors()[0]
columns_check([column, ts] + by, vdf)
by = vdf_columns_names(by, vdf)
column, ts = vdf_columns_names([column, ts], vdf)
acf = vdf.acf(ts=ts, column=column, by=by, p=p, show=False)
pacf = vdf.pacf(ts=ts, column=column, by=by, p=p, show=False)
result = tablesample(
{
"index": [i for i in range(0, len(acf.values["value"]))],
"acf": acf.values["value"],
"pacf": pacf.values["value"],
"confidence": pacf.values["confidence"],
},
)
fig = plt.figure(figsize=(10, 6)) if isnotebook() else plt.figure(figsize=(10, 6))
plt.rcParams["axes.facecolor"] = "#FCFCFC"
ax1 = fig.add_subplot(211)
x, y, confidence = (
result.values["index"],
result.values["acf"],
result.values["confidence"],
)
plt.xlim(-1, x[-1] + 1)
ax1.bar(
x, y, width=0.007 * len(x), color="#444444", zorder=1, linewidth=0,
)
param = {
"s": 90,
"marker": "o",
"facecolors": color,
"edgecolors": "black",
"zorder": 2,
}
ax1.scatter(
x, y, **updated_dict(param, tmp_style,),
)
ax1.plot(
[-1] + x + [x[-1] + 1],
[0 for elem in range(len(x) + 2)],
color=color,
zorder=0,
)
ax1.fill_between(x, confidence, color="#FE5016", alpha=0.1)
ax1.fill_between(x, [-elem for elem in confidence], color="#FE5016", alpha=0.1)
ax1.set_title("Autocorrelation")
y = result.values["pacf"]
ax2 = fig.add_subplot(212)
ax2.bar(x, y, width=0.007 * len(x), color="#444444", zorder=1, linewidth=0)
ax2.scatter(
x, y, **updated_dict(param, tmp_style,),
)
ax2.plot(
[-1] + x + [x[-1] + 1],
[0 for elem in range(len(x) + 2)],
color=color,
zorder=0,
)
ax2.fill_between(x, confidence, color="#FE5016", alpha=0.1)
ax2.fill_between(x, [-elem for elem in confidence], color="#FE5016", alpha=0.1)
ax2.set_title("Partial Autocorrelation")
plt.show()
return result
# ---#
def prc_curve(
y_true: str,
y_score: str,
input_relation: Union[str, vDataFrame],
cursor=None,
pos_label: Union[int, float, str] = 1,
nbins: int = 30,
auc_prc: bool = False,
ax=None,
**style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the PRC Curve.
Parameters
----------
y_true: str
Response column.
y_score: str
Prediction Probability.
input_relation: str/vDataFrame
Relation to use to do the scoring. The relation can be a view or a table
or even a customized relation. For example, you could write:
"(SELECT ... FROM ...) x" as long as an alias is given at the end of the
relation.
cursor: DBcursor, optional
Vertica database cursor.
pos_label: int/float/str, optional
To compute the PRC Curve, one of the response column classes must be the
positive one. The parameter 'pos_label' represents this class.
nbins: int, optional
An integer value that determines the number of decision boundaries. Decision
boundaries are set at equally-spaced intervals between 0 and 1, inclusive.
auc_prc: bool, optional
If set to True, the function will return the PRC AUC without drawing the
curve.
ax: Matplotlib axes object, optional
The axes to plot on.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
check_types(
[
("y_true", y_true, [str],),
("y_score", y_score, [str],),
("input_relation", input_relation, [str, vDataFrame],),
("nbins", nbins, [int, float],),
("auc_prc", auc_prc, [bool],),
]
)
if nbins < 0:
nbins = 999999
cursor, conn, input_relation = check_cursor(cursor, input_relation)
version(cursor=cursor, condition=[9, 1, 0])
query = "SELECT PRC(obs, prob USING PARAMETERS num_bins = {}) OVER() FROM (SELECT (CASE WHEN {} = '{}' THEN 1 ELSE 0 END) AS obs, {}::float AS prob FROM {}) AS prediction_output"
query = query.format(nbins, y_true, pos_label, y_score, input_relation)
executeSQL(cursor, query, "Computing the PRC table.")
query_result = cursor.fetchall()
if conn:
conn.close()
threshold, recall, precision = (
[0] + [item[0] for item in query_result] + [1],
[1] + [item[1] for item in query_result] + [0],
[0] + [item[2] for item in query_result] + [1],
)
auc = 0
for i in range(len(recall) - 1):
if recall[i + 1] - recall[i] != 0.0:
a = (precision[i + 1] - precision[i]) / (recall[i + 1] - recall[i])
b = precision[i + 1] - a * recall[i + 1]
auc = (
auc
+ a * (recall[i + 1] * recall[i + 1] - recall[i] * recall[i]) / 2
+ b * (recall[i + 1] - recall[i])
)
auc = -auc
if auc_prc:
return auc
if not (ax):
fig, ax = plt.subplots()
if isnotebook():
fig.set_size_inches(8, 6)
ax.set_xlabel("Recall")
ax.set_ylabel("Precision")
param = {"color": color_dict(style_kwds, 0)}
ax.plot(recall, precision, **updated_dict(param, style_kwds))
ax.fill_between(
recall,
[0 for item in recall],
precision,
facecolor=color_dict(style_kwds, 0),
alpha=0.1,
)
ax.set_ylim(0, 1)
ax.set_xlim(0, 1)
ax.set_title("PRC Curve")
ax.text(
0.995,
0,
"AUC = " + str(round(auc, 4) * 100) + "%",
verticalalignment="bottom",
horizontalalignment="right",
fontsize=11.5,
)
ax.set_axisbelow(True)
ax.grid()
return tablesample(
values={"threshold": threshold, "recall": recall, "precision": precision},
)
# ---#
def randomized_features_search_cv(
estimator,
input_relation: Union[str, vDataFrame],
X: list,
y: str,
metric: str = "auto",
cv: int = 3,
pos_label: Union[int, float, str] = None,
cutoff: float = -1,
training_score: bool = True,
comb_limit: int = 100,
skip_error: bool = True,
print_info: bool = True,
**kwargs,
):
"""
---------------------------------------------------------------------------
Computes the k-fold grid search of an estimator using different features
combinations. It can be used to find the parameters which will optimize
the model.
Parameters
----------
estimator: object
Vertica estimator with a fit method and a database cursor.
input_relation: str/vDataFrame
Relation to use to train the model.
X: list
List of the predictor columns.
y: str
Response Column.
metric: str, optional
Metric used to do the model evaluation.
auto: logloss for classification & rmse for regression.
For Classification:
accuracy : Accuracy
auc : Area Under the Curve (ROC)
bm : Informedness = tpr + tnr - 1
csi : Critical Success Index = tp / (tp + fn + fp)
f1 : F1 Score
logloss : Log Loss
mcc : Matthews Correlation Coefficient
mk : Markedness = ppv + npv - 1
npv : Negative Predictive Value = tn / (tn + fn)
prc_auc : Area Under the Curve (PRC)
precision : Precision = tp / (tp + fp)
recall : Recall = tp / (tp + fn)
specificity : Specificity = tn / (tn + fp)
For Regression:
max : Max error
mae : Mean absolute error
median : Median absolute error
mse : Mean squared error
msle : Mean squared log error
r2 : R-squared coefficient
r2a : R2 adjusted
rmse : Root-mean-squared error
var : Explained variance
cv: int, optional
Number of folds.
pos_label: int/float/str, optional
The main class to be considered as positive (classification only).
cutoff: float, optional
The model cutoff (classification only).
training_score: bool, optional
If set to True, the training score will be computed with the validation score.
comb_limit: int, optional
Maximum number of features combinations used to train the model.
skip_error: bool, optional
If set to True and an error occurs, it will be displayed and not raised.
print_info: bool, optional
If set to True, prints the model information at each step.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
if isinstance(X, str):
X = [X]
check_types(
[
("metric", metric, [str]),
("training_score", training_score, [bool]),
("skip_error", skip_error, [bool, str,]),
("print_info", print_info, [bool,]),
("comb_limit", comb_limit, [int,]),
]
)
if category_from_model_type(estimator.type)[0] == "regressor" and metric == "auto":
metric = "rmse"
elif metric == "auto":
metric = "logloss"
if len(X) < 20:
all_configuration = all_comb(X)
if len(all_configuration) > comb_limit and comb_limit > 0:
all_configuration = random.sample(all_configuration, comb_limit)
else:
all_configuration = []
for k in range(max(comb_limit, 1)):
config = sorted(random.sample(X, random.randint(1, len(X))))
if config not in all_configuration:
all_configuration += [config]
if verticapy.options["tqdm"] and ("tqdm" not in kwargs or ("tqdm" in kwargs and kwargs["tqdm"])) and print_info:
from tqdm.auto import tqdm
loop = tqdm(all_configuration)
else:
loop = all_configuration
data = []
for config in loop:
if config:
config = list(config)
try:
current_cv = cross_validate(
estimator,
input_relation,
config,
y,
metric,
cv,
pos_label,
cutoff,
True,
training_score,
tqdm=False,
)
if training_score:
keys = [elem for elem in current_cv[0].values]
data += [
(
config,
current_cv[0][keys[1]][cv],
current_cv[1][keys[1]][cv],
current_cv[0][keys[2]][cv],
current_cv[0][keys[1]][cv + 1],
current_cv[1][keys[1]][cv + 1],
)
]
if print_info:
print(f"Model: {str(estimator.__class__).split('.')[-1][:-2]}; Features: {config}; \033[91mTest_score: {current_cv[0][keys[1]][cv]}\033[0m; \033[92mTrain_score: {current_cv[1][keys[1]][cv]}\033[0m; \033[94mTime: {current_cv[0][keys[2]][cv]}\033[0m;")
else:
keys = [elem for elem in current_cv.values]
data += [
(
config,
current_cv[keys[1]][cv],
current_cv[keys[2]][cv],
current_cv[keys[1]][cv + 1],
)
]
if print_info:
print(f"Model: {str(estimator.__class__).split('.')[-1][:-2]}; Features: {config}; \033[91mTest_score: {current_cv[keys[1]][cv]}\033[0m; \033[94mTime:{current_cv[keys[2]][cv]}\033[0m;")
except Exception as e:
if skip_error and skip_error != "no_print":
print(e)
elif not(skip_error):
raise (e)
if not(data):
if training_score:
return tablesample(
{
"parameters": [],
"avg_score": [],
"avg_train_score": [],
"avg_time": [],
"score_std": [],
"score_train_std": [],
}
)
else:
return tablesample(
{
"parameters": [],
"avg_score": [],
"avg_time": [],
"score_std": [],
}
)
reverse = reverse_score(metric)
data.sort(key=lambda tup: tup[1], reverse=reverse)
if training_score:
result = tablesample(
{
"features": [elem[0] for elem in data],
"avg_score": [elem[1] for elem in data],
"avg_train_score": [elem[2] for elem in data],
"avg_time": [elem[3] for elem in data],
"score_std": [elem[4] for elem in data],
"score_train_std": [elem[5] for elem in data],
}
)
if print_info and ("final_print" not in kwargs or kwargs["final_print"] != "no_print"):
print("\033[1mRandomized Features Search Selected Model\033[0m")
print(f"{str(estimator.__class__).split('.')[-1][:-2]}; Features: {result['features'][0]}; \033[91mTest_score: {result['avg_score'][0]}\033[0m; \033[92mTrain_score: {result['avg_train_score'][0]}\033[0m; \033[94mTime: {result['avg_time'][0]}\033[0m;")
else:
result = tablesample(
{
"features": [elem[0] for elem in data],
"avg_score": [elem[1] for elem in data],
"avg_time": [elem[2] for elem in data],
"score_std": [elem[3] for elem in data],
}
)
if print_info and ("final_print" not in kwargs or kwargs["final_print"] != "no_print"):
print("\033[1mRandomized Features Search Selected Model\033[0m")
print(f"{str(estimator.__class__).split('.')[-1][:-2]}; Features: {result['features'][0]}; \033[91mTest_score: {result['avg_score'][0]}\033[0m; \033[94mTime: {result['avg_time'][0]}\033[0m;")
return result
# ---#
def randomized_search_cv(
estimator,
input_relation: Union[str, vDataFrame],
X: list,
y: str,
metric: str = "auto",
cv: int = 3,
pos_label: Union[int, float, str] = None,
cutoff: float = -1,
nbins: int = 1000,
lmax: int = 4,
optimized_grid: int = 1,
print_info: bool = True,
):
"""
---------------------------------------------------------------------------
Computes the K-Fold randomized search of an estimator.
Parameters
----------
estimator: object
Vertica estimator with a fit method and a database cursor.
input_relation: str/vDataFrame
Relation to use to train the model.
X: list
List of the predictor columns.
y: str
Response Column.
metric: str, optional
Metric used to do the model evaluation.
auto: logloss for classification & rmse for regression.
For Classification:
accuracy : Accuracy
auc : Area Under the Curve (ROC)
bm : Informedness = tpr + tnr - 1
csi : Critical Success Index = tp / (tp + fn + fp)
f1 : F1 Score
logloss : Log Loss
mcc : Matthews Correlation Coefficient
mk : Markedness = ppv + npv - 1
npv : Negative Predictive Value = tn / (tn + fn)
prc_auc : Area Under the Curve (PRC)
precision : Precision = tp / (tp + fp)
recall : Recall = tp / (tp + fn)
specificity : Specificity = tn / (tn + fp)
For Regression:
max : Max error
mae : Mean absolute error
median : Median absolute error
mse : Mean squared error
msle : Mean squared log error
r2 : R-squared coefficient
r2a : R2 adjusted
rmse : Root-mean-squared error
var : Explained variance
cv: int, optional
Number of folds.
pos_label: int/float/str, optional
The main class to be considered as positive (classification only).
cutoff: float, optional
The model cutoff (classification only).
nbins: int, optional
Number of bins used to compute the different parameters categories.
lmax: int, optional
Maximum length of each parameter list.
optimized_grid: int, optional
If set to 0, the randomness is based on the input parameters.
If set to 1, the randomness is limited to some parameters while others
are picked based on a default grid.
If set to 2, there is no randomness and a default grid is returned.
print_info: bool, optional
If set to True, prints the model information at each step.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
param_grid = gen_params_grid(estimator, nbins, len(X), lmax, optimized_grid)
return grid_search_cv(
estimator,
param_grid,
input_relation,
X,
y,
metric,
cv,
pos_label,
cutoff,
True,
"no_print",
print_info,
)
# ---#
def roc_curve(
y_true: str,
y_score: str,
input_relation: Union[str, vDataFrame],
cursor=None,
pos_label: Union[int, float, str] = 1,
nbins: int = 30,
auc_roc: bool = False,
best_threshold: bool = False,
cutoff_curve: bool = False,
ax=None,
**style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the ROC Curve.
Parameters
----------
y_true: str
Response column.
y_score: str
Prediction Probability.
input_relation: str/vDataFrame
Relation to use to do the scoring. The relation can be a view or a table
or even a customized relation. For example, you could write:
"(SELECT ... FROM ...) x" as long as an alias is given at the end of the
relation.
cursor: DBcursor, optional
Vertica database cursor.
pos_label: int/float/str, optional
To compute the PRC Curve, one of the response column classes must be the
positive one. The parameter 'pos_label' represents this class.
nbins: int, optional
An integer value that determines the number of decision boundaries. Decision
boundaries are set at equally-spaced intervals between 0 and 1, inclusive.
auc_roc: bool, optional
If set to true, the function will return the ROC AUC without drawing the
curve.
best_threshold: bool, optional
If set to True, the function will return the best threshold without drawing
the curve. The best threshold is the threshold of the point which is the
farest from the random line.
cutoff_curve: bool, optional
If set to True, the Cutoff curve will be drawn.
ax: Matplotlib axes object, optional
The axes to plot on.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
check_types(
[
("y_true", y_true, [str],),
("y_score", y_score, [str],),
("input_relation", input_relation, [str, vDataFrame],),
("nbins", nbins, [int, float],),
("auc_roc", auc_roc, [bool],),
("best_threshold", best_threshold, [bool],),
("cutoff_curve", cutoff_curve, [bool],),
]
)
if nbins < 0:
nbins = 999999
cursor, conn, input_relation = check_cursor(cursor, input_relation)
version(cursor=cursor, condition=[8, 0, 0])
query = "SELECT decision_boundary, false_positive_rate, true_positive_rate FROM (SELECT ROC(obs, prob USING PARAMETERS num_bins = {}) OVER() FROM (SELECT (CASE WHEN {} = '{}' THEN 1 ELSE 0 END) AS obs, {}::float AS prob FROM {}) AS prediction_output) x"
query = query.format(nbins, y_true, pos_label, y_score, input_relation)
executeSQL(cursor, query, "Computing the ROC Table.")
query_result = cursor.fetchall()
if conn:
conn.close()
threshold, false_positive, true_positive = (
[item[0] for item in query_result],
[item[1] for item in query_result],
[item[2] for item in query_result],
)
auc = 0
for i in range(len(false_positive) - 1):
if false_positive[i + 1] - false_positive[i] != 0.0:
a = (true_positive[i + 1] - true_positive[i]) / (
false_positive[i + 1] - false_positive[i]
)
b = true_positive[i + 1] - a * false_positive[i + 1]
auc = (
auc
+ a
* (
false_positive[i + 1] * false_positive[i + 1]
- false_positive[i] * false_positive[i]
)
/ 2
+ b * (false_positive[i + 1] - false_positive[i])
)
auc = -auc
auc = min(auc, 1.0)
if auc_roc:
return auc
if best_threshold:
l = [abs(y - x) for x, y in zip(false_positive, true_positive)]
best_threshold_arg = max(zip(l, range(len(l))))[1]
best = max(threshold[best_threshold_arg], 0.001)
best = min(best, 0.999)
return best
if not (ax):
fig, ax = plt.subplots()
if isnotebook():
fig.set_size_inches(8, 6)
color1, color2 = color_dict(style_kwds, 0), color_dict(style_kwds, 1)
if color1 == color2:
color2 = gen_colors()[1]
if cutoff_curve:
ax.plot(
threshold,
[1 - item for item in false_positive],
label="Specificity",
**updated_dict({"color": gen_colors()[0]}, style_kwds),
)
ax.plot(
threshold,
true_positive,
label="Sensitivity",
**updated_dict({"color": gen_colors()[1]}, style_kwds),
)
ax.fill_between(
threshold,
[1 - item for item in false_positive],
true_positive,
facecolor="black",
alpha=0.02,
)
ax.set_xlabel("Decision Boundary")
ax.set_title("Cutoff Curve")
ax.legend(loc="center left", bbox_to_anchor=[1, 0.5])
else:
ax.set_xlabel("False Positive Rate (1-Specificity)")
ax.set_ylabel("True Positive Rate (Sensitivity)")
ax.plot(
false_positive,
true_positive,
**updated_dict({"color": gen_colors()[0]}, style_kwds),
)
ax.fill_between(
false_positive, false_positive, true_positive, facecolor=color1, alpha=0.1
)
ax.fill_between([0, 1], [0, 0], [0, 1], facecolor=color2, alpha=0.1)
ax.plot([0, 1], [0, 1], color=color2)
ax.set_title("ROC Curve")
ax.text(
0.995,
0,
"AUC = " + str(round(auc, 4) * 100) + "%",
verticalalignment="bottom",
horizontalalignment="right",
fontsize=11.5,
)
ax.set_ylim(0, 1)
ax.set_xlim(0, 1)
ax.set_axisbelow(True)
ax.grid()
return tablesample(
values={
"threshold": threshold,
"false_positive": false_positive,
"true_positive": true_positive,
},
)
# ---#
def stepwise(
estimator,
input_relation: Union[str, vDataFrame],
X: list,
y: str,
criterion: str = "bic",
direction: str = "backward",
max_steps: int = 100,
criterion_threshold: int = 3,
drop_final_estimator: bool = True,
x_order: str = "pearson",
print_info: bool = True,
show: bool = True,
ax=None,
**style_kwds,
):
"""
---------------------------------------------------------------------------
Uses the Stepwise algorithm to find the most suitable number of features
when fitting the estimator.
Parameters
----------
estimator: object
Vertica estimator with a fit method and a database cursor.
input_relation: str/vDataFrame
Relation to use to train the model.
X: list
List of the predictor columns.
y: str
Response Column.
criterion: str, optional
Criterion used to evaluate the model.
aic : Akaike’s Information Criterion
bic : Bayesian Information Criterion
direction: str, optional
How to start the stepwise search. Can be done 'backward' or 'forward'.
max_steps: int, optional
The maximum number of steps to be considered.
criterion_threshold: int, optional
Threshold used when comparing the models criterions. If the difference
is lesser than the threshold then the current 'best' model is changed.
drop_final_estimator: bool, optional
If set to True, the final estimator will be dropped.
x_order: str, optional
How to preprocess X before using the stepwise algorithm.
pearson : X is ordered based on the Pearson's correlation coefficient.
spearman : X is ordered based on the Spearman's correlation coefficient.
random : Shuffles the vector X before applying the stepwise algorithm.
none : Does not change the order of X.
print_info: bool, optional
If set to True, prints the model information at each step.
show: bool, optional
If set to True, the stepwise graphic will be drawn.
ax: Matplotlib axes object, optional
The axes to plot on.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
from verticapy.learn.metrics import aic_bic
if isinstance(X, str):
X = [X]
if isinstance(x_order, str):
x_order = x_order.lower()
assert len(X) >= 1, ParameterError("Vector X must have at least one element.")
check_types(
[
("criterion", criterion, ["aic", "bic",]),
("direction", direction, ["forward", "backward",]),
("max_steps", max_steps, [int, float,]),
("print_info", print_info, [bool,]),
("x_order", x_order, ["pearson", "spearman", "random", "none",]),
]
)
does_model_exist(name=estimator.name, cursor=estimator.cursor, raise_error=True)
result, current_step = [], 0
table = input_relation if isinstance(input_relation, str) else input_relation.__genSQL__()
estimator.cursor.execute(f"SELECT AVG({y}) FROM {table}")
avg = estimator.cursor.fetchone()[0]
k = 0 if criterion == "aic" else 1
if x_order == "random":
random.shuffle(X)
elif x_order in ("spearman", "pearson"):
if isinstance(input_relation, str):
vdf = vdf_from_relation(input_relation, cursor=estimator.cursor)
else:
vdf = input_relation
X = [elem for elem in vdf.corr(method=x_order, focus=y, columns=X, show=False,)["index"]]
if direction == "backward":
X.reverse()
if print_info:
print("\033[1m\033[4mStarting Stepwise\033[0m\033[0m")
if verticapy.options["tqdm"] and print_info:
from tqdm.auto import tqdm
loop = tqdm(range(len(X)))
else:
loop = range(len(X))
model_id = 0
if direction == "backward":
X_current = [elem for elem in X]
estimator.drop()
estimator.fit(input_relation, X, y)
current_score = estimator.score(criterion)
result += [(X_current, current_score, None, None, 0, None)]
for idx in loop:
if print_info and idx == 0:
print(f"\033[1m[Model 0]\033[0m \033[92m{criterion}: {current_score}\033[0m; Variables: {X_current}")
if current_step >= max_steps:
break
X_test = [elem for elem in X_current]
X_test.remove(X[idx])
if len(X_test) != 0:
estimator.drop()
estimator.fit(input_relation, X_test, y)
test_score = estimator.score(criterion,)
else:
test_score = aic_bic(y, str(avg), input_relation, estimator.cursor, 0)[k]
score_diff = test_score - current_score
if test_score - current_score < criterion_threshold:
sign = "-"
model_id += 1
current_score = test_score
X_current = [elem for elem in X_test]
if print_info:
print(f"\033[1m[Model {model_id}]\033[0m \033[92m{criterion}: {test_score}\033[0m; \033[91m(-) Variable: {X[idx]}\033[0m")
else:
sign = "+"
result += [(X_test, test_score, sign, X[idx], idx + 1, score_diff)]
current_step += 1
else:
X_current = []
current_score = aic_bic(y, str(avg), input_relation, estimator.cursor, 0)[k]
result += [(X_current, current_score, None, None, 0, None)]
for idx in loop:
if print_info and idx == 0:
print(f"\033[1m[Model 0]\033[0m \033[92m{criterion}: {current_score}\033[0m; Variables: {X_current}")
if current_step >= max_steps:
break
X_test = [elem for elem in X_current] + [X[idx]]
estimator.drop()
estimator.fit(input_relation, X_test, y)
test_score = estimator.score(criterion,)
score_diff = current_score - test_score
if current_score - test_score > criterion_threshold:
sign = "+"
model_id += 1
current_score = test_score
X_current = [elem for elem in X_test]
if print_info:
print(f"\033[1m[Model {model_id}]\033[0m \033[92m{criterion}: {test_score}\033[0m; \033[91m(+) Variable: {X[idx]}\033[0m")
else:
sign = "-"
result += [(X_test, test_score, sign, X[idx], idx + 1, score_diff)]
current_step += 1
if print_info:
print(f"\033[1m\033[4mSelected Model\033[0m\033[0m\n")
print(f"\033[1m[Model {model_id}]\033[0m \033[92m{criterion}: {current_score}\033[0m; Variables: {X_current}")
features = [elem[0] for elem in result]
for idx, elem in enumerate(features):
features[idx] = [item.replace('"', '') for item in elem]
importance = [elem[5] if (elem[5]) and elem[5] > 0 else 0 for elem in result]
importance = [100 * elem / sum(importance) for elem in importance]
result = tablesample({"index": [elem[4] for elem in result], "features": features, criterion: [elem[1] for elem in result], "change": [elem[2] for elem in result], "variable": [elem[3] for elem in result], "importance": importance})
estimator.drop()
if not(drop_final_estimator):
estimator.fit(input_relation, X_current, y)
result.best_list_ = X_current
if show:
plot_stepwise_ml([len(elem) for elem in result["features"]], result[criterion], result["variable"], result["change"], [result["features"][0], X_current], x_label="n_features", y_label=criterion, direction=direction, ax=ax, **style_kwds,)
coeff_importances = {}
for idx in range(len(importance)):
if result["variable"][idx] != None:
coeff_importances[result["variable"][idx]] = importance[idx]
plot_importance(coeff_importances, print_legend=False, ax=ax, **style_kwds,)
return result
# ---#
def validation_curve(
estimator,
param_name: str,
param_range: list,
input_relation: Union[str, vDataFrame],
X: list,
y: str,
metric: str = "auto",
cv: int = 3,
pos_label: Union[int, float, str] = None,
cutoff: float = -1,
std_coeff: float = 1,
ax=None,
**style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the validation curve.
Parameters
----------
estimator: object
Vertica estimator with a fit method and a database cursor.
param_name: str
Parameter name.
param_range: list
Parameter Range.
input_relation: str/vDataFrame
Relation to use to train the model.
X: list
List of the predictor columns.
y: str
Response Column.
metric: str, optional
Metric used to do the model evaluation.
auto: logloss for classification & rmse for regression.
For Classification:
accuracy : Accuracy
auc : Area Under the Curve (ROC)
bm : Informedness = tpr + tnr - 1
csi : Critical Success Index = tp / (tp + fn + fp)
f1 : F1 Score
logloss : Log Loss
mcc : Matthews Correlation Coefficient
mk : Markedness = ppv + npv - 1
npv : Negative Predictive Value = tn / (tn + fn)
prc_auc : Area Under the Curve (PRC)
precision : Precision = tp / (tp + fp)
recall : Recall = tp / (tp + fn)
specificity : Specificity = tn / (tn + fp)
For Regression:
max : Max error
mae : Mean absolute error
median : Median absolute error
mse : Mean squared error
msle : Mean squared log error
r2 : R-squared coefficient
r2a : R2 adjusted
rmse : Root-mean-squared error
var : Explained variance
cv: int, optional
Number of folds.
pos_label: int/float/str, optional
The main class to be considered as positive (classification only).
cutoff: float, optional
The model cutoff (classification only).
std_coeff: float, optional
Value of the standard deviation coefficient used to compute the area plot
around each score.
ax: Matplotlib axes object, optional
The axes to plot on.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
if not (isinstance(param_range, Iterable)) or isinstance(param_range, str):
param_range = [param_range]
from verticapy.plot import range_curve
gs_result = grid_search_cv(
estimator,
{param_name: param_range},
input_relation,
X,
y,
metric,
cv,
pos_label,
cutoff,
True,
False,
False,
)
gs_result_final = [
(
gs_result["parameters"][i][param_name],
gs_result["avg_score"][i],
gs_result["avg_train_score"][i],
gs_result["score_std"][i],
gs_result["score_train_std"][i],
)
for i in range(len(param_range))
]
gs_result_final.sort(key=lambda tup: tup[0])
X = [elem[0] for elem in gs_result_final]
Y = [
[
[elem[2] - std_coeff * elem[4] for elem in gs_result_final],
[elem[2] for elem in gs_result_final],
[elem[2] + std_coeff * elem[4] for elem in gs_result_final],
],
[
[elem[1] - std_coeff * elem[3] for elem in gs_result_final],
[elem[1] for elem in gs_result_final],
[elem[1] + std_coeff * elem[3] for elem in gs_result_final],
],
]
result = tablesample(
{
param_name: X,
"training_score_lower": Y[0][0],
"training_score": Y[0][1],
"training_score_upper": Y[0][2],
"test_score_lower": Y[1][0],
"test_score": Y[1][1],
"test_score_upper": Y[1][2],
}
)
range_curve(
X, Y, param_name, metric, ax, ["train", "test"], **style_kwds,
)
return result
```
#### File: tests/vModel/test_delphi.py
```python
import pytest, warnings, os, verticapy
from verticapy import set_option
from verticapy.learn.delphi import *
import matplotlib.pyplot as plt
set_option("print_info", False)
set_option("random_state", 0)
@pytest.fixture(scope="module")
def amazon_vd(base):
from verticapy.datasets import load_amazon
amazon = load_amazon(cursor=base.cursor)
yield amazon
with warnings.catch_warnings(record=True) as w:
drop(
name="public.amazon", cursor=base.cursor,
)
@pytest.fixture(scope="module")
def titanic_vd(base):
from verticapy.datasets import load_titanic
titanic = load_titanic(cursor=base.cursor)
yield titanic
with warnings.catch_warnings(record=True) as w:
drop(
name="public.titanic", cursor=base.cursor,
)
@pytest.fixture(scope="module")
def winequality_vd(base):
from verticapy.datasets import load_winequality
winequality = load_winequality(cursor=base.cursor)
yield winequality
with warnings.catch_warnings(record=True) as w:
drop(
name="public.winequality", cursor=base.cursor,
)
class TestDelphi:
def test_AutoML(self, base, titanic_vd):
model = AutoML("AutoML_test_ml", cursor=base.cursor)
model.drop()
model.fit(titanic_vd, y="survived")
assert model.model_grid_["avg_score"][0] < 0.1
assert len(model.plot().get_default_bbox_extra_artists()) < 30
plt.close("all")
assert len(model.plot("stepwise").get_default_bbox_extra_artists()) < 200
plt.close("all")
model.drop()
def test_AutoDataPrep(self, base, titanic_vd, amazon_vd):
model = AutoDataPrep("AutoML_test_dp", cursor=base.cursor)
model.drop()
model.fit(titanic_vd)
assert model.final_relation_.shape() == (1234, 56)
model.drop()
model2 = AutoDataPrep("AutoML_test_dp", cursor=base.cursor, num_method="same_freq")
model2.drop()
model2.fit(titanic_vd)
assert model2.final_relation_.shape() == (1234, 101)
model2.drop()
model3 = AutoDataPrep("AutoML_test_dp", cursor=base.cursor, num_method="same_width", na_method="drop", apply_pca=True)
model3.drop()
model3.fit(titanic_vd)
assert model3.final_relation_.shape() == (112, 122)
model3.drop()
model4 = AutoDataPrep("AutoML_test_dp", cursor=base.cursor)
model4.drop()
model4.fit(amazon_vd)
assert model4.final_relation_.shape() == (6318, 3)
model4.drop()
def test_AutoClustering(self, base, titanic_vd):
model = AutoClustering("AutoML_test_cluster", cursor=base.cursor)
model.drop()
model.fit(titanic_vd,)
assert model.model_.parameters["n_cluster"] < 100
model.drop()
```
#### File: tests/vModel/test_linear_svc.py
```python
import pytest, warnings, math, sys, os, verticapy
from verticapy.learn.svm import LinearSVC
from verticapy import drop, set_option, vertica_conn
import matplotlib.pyplot as plt
set_option("print_info", False)
@pytest.fixture(scope="module")
def titanic_vd(base):
from verticapy.datasets import load_titanic
titanic = load_titanic(cursor=base.cursor)
yield titanic
with warnings.catch_warnings(record=True) as w:
drop(name="public.titanic", cursor=base.cursor)
@pytest.fixture(scope="module")
def winequality_vd(base):
from verticapy.datasets import load_winequality
winequality = load_winequality(cursor=base.cursor)
yield winequality
with warnings.catch_warnings(record=True) as w:
drop(name="public.winequality", cursor=base.cursor)
@pytest.fixture(scope="module")
def model(base, titanic_vd):
base.cursor.execute("DROP MODEL IF EXISTS lsvc_model_test")
model_class = LinearSVC("lsvc_model_test", cursor=base.cursor)
model_class.fit("public.titanic", ["age", "fare"], "survived")
yield model_class
model_class.drop()
class TestLinearSVC:
def test_repr(self, model):
assert "predictor|coefficient" in model.__repr__()
model_repr = LinearSVC("model_repr")
model_repr.drop()
assert model_repr.__repr__() == "<LinearSVC>"
def test_classification_report(self, model):
cls_rep1 = model.classification_report().transpose()
assert cls_rep1["auc"][0] == pytest.approx(0.687468030690537, 1e-2)
assert cls_rep1["prc_auc"][0] == pytest.approx(0.5976470350144453, 1e-2)
assert cls_rep1["accuracy"][0] == pytest.approx(0.6726094003241491, 1e-2)
assert cls_rep1["log_loss"][0] == pytest.approx(0.279724470067258, 1e-2)
assert cls_rep1["precision"][0] == pytest.approx(0.6916666666666667, 1e-2)
assert cls_rep1["recall"][0] == pytest.approx(0.18444444444444444, 1e-2)
assert cls_rep1["f1_score"][0] == pytest.approx(0.29122807017543856, 1e-2)
assert cls_rep1["mcc"][0] == pytest.approx(0.22296937510796555, 1e-2)
assert cls_rep1["informedness"][0] == pytest.approx(0.13725056689342408, 1e-2)
assert cls_rep1["markedness"][0] == pytest.approx(0.36222321962896453, 1e-2)
assert cls_rep1["csi"][0] == pytest.approx(0.1704312114989733, 1e-2)
assert cls_rep1["cutoff"][0] == pytest.approx(0.5, 1e-2)
cls_rep2 = model.classification_report(cutoff=0.2).transpose()
assert cls_rep2["cutoff"][0] == pytest.approx(0.2, 1e-2)
def test_confusion_matrix(self, model):
conf_mat1 = model.confusion_matrix()
assert conf_mat1[0][0] == 747
assert conf_mat1[0][1] == 367
assert conf_mat1[1][0] == 37
assert conf_mat1[1][1] == 83
conf_mat2 = model.confusion_matrix(cutoff=0.2)
assert conf_mat2[0][0] == 179
assert conf_mat2[0][1] == 59
assert conf_mat2[1][0] == 605
assert conf_mat2[1][1] == 391
def test_contour(self, base, titanic_vd):
model_test = LinearSVC("model_contour", cursor=base.cursor)
model_test.drop()
model_test.fit(
titanic_vd,
["age", "fare",],
"survived",
)
result = model_test.contour()
assert len(result.get_default_bbox_extra_artists()) == 34
model_test.drop()
def test_deploySQL(self, model):
expected_sql = "PREDICT_SVM_CLASSIFIER(\"age\", \"fare\" USING PARAMETERS model_name = 'lsvc_model_test', type = 'probability', match_by_pos = 'true')"
result_sql = model.deploySQL()
assert result_sql == expected_sql
def test_drop(self, base):
base.cursor.execute("DROP MODEL IF EXISTS lsvc_model_test_drop")
model_test = LinearSVC("lsvc_model_test_drop", cursor=base.cursor)
model_test.fit("public.titanic", ["age", "fare"], "survived")
base.cursor.execute(
"SELECT model_name FROM models WHERE model_name = 'lsvc_model_test_drop'"
)
assert base.cursor.fetchone()[0] == "lsvc_model_test_drop"
model_test.drop()
base.cursor.execute(
"SELECT model_name FROM models WHERE model_name = 'lsvc_model_test_drop'"
)
assert base.cursor.fetchone() is None
def test_features_importance(self, model):
f_imp = model.features_importance()
assert f_imp["index"] == ["fare", "age"]
assert f_imp["importance"] == [85.09, 14.91]
assert f_imp["sign"] == [1, -1]
plt.close("all")
def test_lift_chart(self, model):
lift_ch = model.lift_chart(nbins=1000)
assert lift_ch["decision_boundary"][10] == pytest.approx(0.01)
assert lift_ch["positive_prediction_ratio"][10] == pytest.approx(0.0)
assert lift_ch["decision_boundary"][900] == pytest.approx(0.9)
assert lift_ch["positive_prediction_ratio"][900] == pytest.approx(1.0)
assert lift_ch["lift"][900] == pytest.approx(1.0)
plt.close("all")
def test_get_plot(self, base, winequality_vd):
base.cursor.execute("DROP MODEL IF EXISTS model_test_plot")
model_test = LinearSVC("model_test_plot", cursor=base.cursor)
model_test.fit(winequality_vd, ["alcohol"], "good")
result = model_test.plot(color="r")
assert len(result.get_default_bbox_extra_artists()) == 11
plt.close("all")
model_test.drop()
model_test.fit(winequality_vd, ["alcohol", "residual_sugar"], "good")
result = model_test.plot(color="r")
assert len(result.get_default_bbox_extra_artists()) == 11
plt.close("all")
model_test.drop()
model_test.fit(winequality_vd, ["alcohol", "residual_sugar", "fixed_acidity"], "good")
result = model_test.plot(color="r")
assert len(result.get_default_bbox_extra_artists()) == 5
plt.close("all")
model_test.drop()
def test_to_sklearn(self, model):
md = model.to_sklearn()
model.cursor.execute(
"SELECT PREDICT_SVM_CLASSIFIER(11.0, 1993. USING PARAMETERS model_name = '{}', match_by_pos=True)".format(
model.name
)
)
prediction = model.cursor.fetchone()[0]
assert prediction == pytest.approx(md.predict([[11.0, 1993.0]])[0])
def test_to_python(self, model):
model.cursor.execute(
"SELECT PREDICT_SVM_CLASSIFIER(3.0, 11.0 USING PARAMETERS model_name = '{}', match_by_pos=True)".format(
model.name
)
)
prediction = model.cursor.fetchone()[0]
assert prediction == pytest.approx(model.to_python(return_str=False)([[3.0, 11.0,]])[0])
model.cursor.execute(
"SELECT PREDICT_SVM_CLASSIFIER(3.0, 11.0 USING PARAMETERS model_name = '{}', type='probability', class=1, match_by_pos=True)".format(
model.name
)
)
prediction = model.cursor.fetchone()[0]
assert prediction == pytest.approx(model.to_python(return_proba=True, return_str=False)([[3.0, 11.0,]])[0][1])
def test_to_memmodel(self, model, titanic_vd):
mmodel = model.to_memmodel()
res = mmodel.predict([[3.0, 11.0,],
[11.0, 1.0,]])
res_py = model.to_python()([[3.0, 11.0,],
[11.0, 1.0,]])
assert res[0] == res_py[0]
assert res[1] == res_py[1]
res = mmodel.predict_proba([[3.0, 11.0,],
[11.0, 1.0,]])
res_py = model.to_python(return_proba = True)([[3.0, 11.0,],
[11.0, 1.0,]])
assert res[0][0] == res_py[0][0]
assert res[0][1] == res_py[0][1]
assert res[1][0] == res_py[1][0]
assert res[1][1] == res_py[1][1]
vdf = titanic_vd.copy()
vdf["prediction_sql"] = mmodel.predict_sql(["age", "fare"])
vdf["prediction_proba_sql_0"] = mmodel.predict_proba_sql(["age", "fare"])[0]
vdf["prediction_proba_sql_1"] = mmodel.predict_proba_sql(["age", "fare"])[1]
model.predict(vdf, name = "prediction_vertica_sql", cutoff = 0.5)
model.predict(vdf, name = "prediction_proba_vertica_sql_1")
vdf["prediction_proba_vertica_sql_0"] = 1 - vdf["prediction_proba_vertica_sql_1"]
score = vdf.score("prediction_sql", "prediction_vertica_sql", "accuracy")
assert score == pytest.approx(1.0)
score = vdf.score("prediction_proba_sql_0", "prediction_proba_vertica_sql_0", "r2")
assert score == pytest.approx(1.0)
score = vdf.score("prediction_proba_sql_1", "prediction_proba_vertica_sql_1", "r2")
assert score == pytest.approx(1.0)
def test_to_sql(self, model):
model.cursor.execute(
"SELECT PREDICT_SVM_CLASSIFIER(3.0, 11.0 USING PARAMETERS model_name = '{}', match_by_pos=True), {}".format(
model.name, model.to_sql([3.0, 11.0])
)
)
prediction = model.cursor.fetchone()
assert prediction[0] == pytest.approx(prediction[1])
@pytest.mark.skip(reason="shap doesn't want to get installed.")
def test_shapExplainer(self, model):
explainer = model.shapExplainer()
assert explainer.expected_value[0] == pytest.approx(-0.22667938806360247)
def test_get_attr(self, model):
attr = model.get_attr()
assert attr["attr_name"] == [
"details",
"accepted_row_count",
"rejected_row_count",
"iteration_count",
"call_string",
]
assert attr["attr_fields"] == [
"predictor, coefficient",
"accepted_row_count",
"rejected_row_count",
"iteration_count",
"call_string",
]
assert attr["#_of_rows"] == [3, 1, 1, 1, 1]
details = model.get_attr("details")
assert details["predictor"] == ["Intercept", "age", "fare"]
assert details["coefficient"][0] == pytest.approx(-0.226679636751873)
assert details["coefficient"][1] == pytest.approx(-0.00661256493751514)
assert details["coefficient"][2] == pytest.approx(0.00587052591948468)
assert model.get_attr("accepted_row_count")["accepted_row_count"][0] == 996
assert model.get_attr("rejected_row_count")["rejected_row_count"][0] == 238
assert model.get_attr("iteration_count")["iteration_count"][0] == 6
assert (
model.get_attr("call_string")["call_string"][0]
== "SELECT svm_classifier('public.lsvc_model_test', 'public.titanic', '\"survived\"', '\"age\", \"fare\"'\nUSING PARAMETERS class_weights='1,1', C=1, max_iterations=100, intercept_mode='regularized', intercept_scaling=1, epsilon=0.0001);"
)
def test_get_params(self, model):
params = model.get_params()
assert params == {
"tol": 0.0001,
"C": 1.0,
"max_iter": 100,
"fit_intercept": True,
"intercept_scaling": 1.0,
"intercept_mode": "regularized",
"class_weight": [1, 1],
"penalty": "l2",
}
def test_prc_curve(self, model):
prc = model.prc_curve(nbins=1000)
assert prc["threshold"][10] == pytest.approx(0.009)
assert prc["recall"][10] == pytest.approx(1.0)
assert prc["precision"][10] == pytest.approx(0.392570281124498)
assert prc["threshold"][900] == pytest.approx(0.899)
assert prc["recall"][900] == pytest.approx(0.010230179028133)
assert prc["precision"][900] == pytest.approx(1.0)
plt.close("all")
def test_predict(self, titanic_vd, model):
titanic_copy = titanic_vd.copy()
model.predict(titanic_copy, name="pred_probability")
assert titanic_copy["pred_probability"].min() == pytest.approx(0.33841486903496)
model.predict(titanic_copy, name="pred_class1", cutoff=0.7)
assert titanic_copy["pred_class1"].sum() == 23.0
model.predict(titanic_copy, name="pred_class2", cutoff=0.3)
assert titanic_copy["pred_class2"].sum() == 996.0
def test_roc_curve(self, model):
roc = model.roc_curve(nbins=1000)
assert roc["threshold"][100] == pytest.approx(0.1)
assert roc["false_positive"][100] == pytest.approx(1.0)
assert roc["true_positive"][100] == pytest.approx(1.0)
assert roc["threshold"][700] == pytest.approx(0.7)
assert roc["false_positive"][700] == pytest.approx(0.00661157024793388)
assert roc["true_positive"][700] == pytest.approx(0.0485933503836317)
plt.close("all")
def test_cutoff_curve(self, model):
cutoff_curve = model.cutoff_curve(nbins=1000)
assert cutoff_curve["threshold"][100] == pytest.approx(0.1)
assert cutoff_curve["false_positive"][100] == pytest.approx(1.0)
assert cutoff_curve["true_positive"][100] == pytest.approx(1.0)
assert cutoff_curve["threshold"][700] == pytest.approx(0.7)
assert cutoff_curve["false_positive"][700] == pytest.approx(0.00661157024793388)
assert cutoff_curve["true_positive"][700] == pytest.approx(0.0485933503836317)
plt.close("all")
def test_score(self, model):
assert model.score(cutoff=0.7, method="accuracy") == pytest.approx(
0.6474878444084279
)
assert model.score(cutoff=0.3, method="accuracy") == pytest.approx(
0.4619124797406807
)
assert model.score(cutoff=0.7, method="auc") == pytest.approx(
0.6933968844454788
)
assert model.score(cutoff=0.3, method="auc") == pytest.approx(
0.6933968844454788
)
assert model.score(cutoff=0.7, method="best_cutoff") == pytest.approx(0.431)
assert model.score(cutoff=0.3, method="best_cutoff") == pytest.approx(0.431)
assert model.score(cutoff=0.7, method="bm") == pytest.approx(
0.03712018140589568
)
assert model.score(cutoff=0.3, method="bm") == pytest.approx(
0.09720521541950111
)
assert model.score(cutoff=0.7, method="csi") == pytest.approx(
0.04185022026431718
)
assert model.score(cutoff=0.3, method="csi") == pytest.approx(
0.3706161137440758
)
assert model.score(cutoff=0.7, method="f1") == pytest.approx(0.080338266384778)
assert model.score(cutoff=0.3, method="f1") == pytest.approx(0.5408022130013832)
assert model.score(cutoff=0.7, method="logloss") == pytest.approx(
0.279724470067258
)
assert model.score(cutoff=0.3, method="logloss") == pytest.approx(
0.279724470067258
)
assert model.score(cutoff=0.7, method="mcc") == pytest.approx(
0.13211082012086103
)
assert model.score(cutoff=0.3, method="mcc") == pytest.approx(
0.11858662456854734
)
assert model.score(cutoff=0.7, method="mk") == pytest.approx(0.4701827451261984)
assert model.score(cutoff=0.3, method="mk") == pytest.approx(
0.14467112146063243
)
assert model.score(cutoff=0.7, method="npv") == pytest.approx(
0.8260869565217391
)
assert model.score(cutoff=0.3, method="npv") == pytest.approx(0.392570281124498)
assert model.score(cutoff=0.7, method="prc_auc") == pytest.approx(
0.5976470350144453
)
assert model.score(cutoff=0.3, method="prc_auc") == pytest.approx(
0.5976470350144453
)
assert model.score(cutoff=0.7, method="precision") == pytest.approx(
0.8260869565217391
)
assert model.score(cutoff=0.3, method="precision") == pytest.approx(
0.392570281124498
)
assert model.score(cutoff=0.7, method="specificity") == pytest.approx(
0.9948979591836735
)
assert model.score(cutoff=0.3, method="specificity") == pytest.approx(
0.22831632653061223
)
def test_set_cursor(self, model):
cur = vertica_conn(
"vp_test_config",
os.path.dirname(verticapy.__file__) + "/tests/verticaPy_test_tmp.conf",
).cursor()
model.set_cursor(cur)
model.cursor.execute("SELECT 1;")
result = model.cursor.fetchone()
assert result[0] == 1
def test_set_params(self, model):
model.set_params({"max_iter": 1000})
assert model.get_params()["max_iter"] == 1000
def test_model_from_vDF(self, base, titanic_vd):
base.cursor.execute("DROP MODEL IF EXISTS lsvc_from_vDF")
model_test = LinearSVC("lsvc_from_vDF", cursor=base.cursor)
model_test.fit(titanic_vd, ["age", "fare"], "survived")
base.cursor.execute(
"SELECT model_name FROM models WHERE model_name = 'lsvc_from_vDF'"
)
assert base.cursor.fetchone()[0] == "lsvc_from_vDF"
model_test.drop()
```
#### File: tests/vModel/test_sarimax.py
```python
import pytest, warnings, sys, os, verticapy
from verticapy.learn.tsa import SARIMAX
from verticapy import drop, set_option, vertica_conn, create_verticapy_schema
import matplotlib.pyplot as plt
set_option("print_info", False)
@pytest.fixture(scope="module")
def amazon_vd(base):
from verticapy.datasets import load_amazon
amazon = load_amazon(cursor=base.cursor)
yield amazon
with warnings.catch_warnings(record=True) as w:
drop(name="public.amazon", cursor=base.cursor)
@pytest.fixture(scope="module")
def model(base, amazon_vd):
create_verticapy_schema(base.cursor)
model_class = SARIMAX("sarimax_model_test", cursor=base.cursor, p=1, d=1, q=1, s=12, P=1, D=1, Q=1, max_pik=20)
model_class.drop()
model_class.fit("public.amazon", "number", "date",)
yield model_class
model_class.drop()
class TestSARIMAX:
def test_repr(self, model):
assert "Additional Info" in model.__repr__()
model_repr = SARIMAX("sarimax_repr", cursor=model.cursor)
model_repr.drop()
assert model_repr.__repr__() == "<SARIMAX>"
def test_deploySQL(self, model):
assert 'VerticaPy_y_copy' in model.deploySQL()
def test_drop(self, base):
model_test = SARIMAX("sarimax_model_test_drop", cursor=base.cursor)
model_test.drop()
model_test.fit("public.amazon", "number", "date",)
base.cursor.execute(
"SELECT model_name FROM verticapy.models WHERE model_name IN ('sarimax_model_test_drop', '\"sarimax_model_test_drop\"')"
)
assert base.cursor.fetchone()[0] in ("sarimax_model_test_drop", '"sarimax_model_test_drop"')
model_test.drop()
base.cursor.execute(
"SELECT model_name FROM verticapy.models WHERE model_name IN ('sarimax_model_test_drop', '\"sarimax_model_test_drop\"')"
)
assert base.cursor.fetchone() is None
def test_get_attr(self, model):
m_att = model.get_attr()
assert m_att["attr_name"] == [
"coefficients",
"ma_avg",
"ma_piq",
]
m_att_details = model.get_attr(attr_name="coefficients")
assert m_att_details["predictor"] == [
"Intercept",
"ar1",
"ar12",
"ma1",
"ma12",
]
assert m_att_details["coefficient"][0] == pytest.approx(-0.0206811318986692, abs=1e-6)
assert m_att_details["coefficient"][1] == pytest.approx(-0.472445862105583, abs=1e-6)
assert m_att_details["coefficient"][2] == pytest.approx(-0.283486934349855, abs=1e-6)
assert m_att_details["coefficient"][3] == pytest.approx(-0.289912044494682, abs=1e-6)
assert m_att_details["coefficient"][4] == pytest.approx(-0.5845016482145707, abs=1e-6)
assert model.get_attr(attr_name="ma_avg") == pytest.approx(-0.271509332267827, abs=1e-6)
assert model.get_attr(attr_name="ma_piq")["coefficient"][0:2] == [pytest.approx(-1, abs=1e-6), pytest.approx(0.289912044494682, abs=1e-6)]
def test_get_params(self, model):
assert model.get_params() == {'D': 1,
'P': 1,
'Q': 1,
'd': 1,
'max_iter': 1000,
'max_pik': 20,
'p': 1,
'papprox_ma': 200,
'q': 1,
's': 12,
'solver': 'Newton',
'tol': 0.0001}
def test_get_plot(self, model,):
result = model.plot(color="r", nlead=10, nlast=10, dynamic=True,)
assert len(result.get_default_bbox_extra_artists()) == 18
plt.close("all")
def test_get_predicts(self, amazon_vd, model):
result = model.predict(
amazon_vd,
name="predict",
nlead=10,
)
assert result["predict"].avg() == pytest.approx(
140.036629403195, abs=1e-6
)
def test_regression_report(self, model):
reg_rep = model.regression_report()
assert reg_rep["index"] == [
"explained_variance",
"max_error",
"median_absolute_error",
"mean_absolute_error",
"mean_squared_error",
"root_mean_squared_error",
"r2",
"r2_adj",
"aic",
"bic",
]
assert reg_rep["value"][0] == pytest.approx(-0.755650497621303, abs=1e-6)
assert float(reg_rep["value"][1]) == pytest.approx(28044.894016038184, abs=1e-6)
assert reg_rep["value"][2] == pytest.approx(337.270572384534, abs=1e-6)
assert reg_rep["value"][3] == pytest.approx(1089.45677026604, abs=1e-6)
assert reg_rep["value"][4] == pytest.approx(4992560.88725707, abs=1e-6)
assert reg_rep["value"][5] == pytest.approx(2234.403922136074, abs=1e-6)
assert reg_rep["value"][6] == pytest.approx(-0.823855565723365, abs=1e-6)
assert reg_rep["value"][7] == pytest.approx(-0.8249988696354311, abs=1e-6)
assert reg_rep["value"][8] == pytest.approx(98504.22202646323, abs=1e-6)
assert reg_rep["value"][9] == pytest.approx(98538.0219389409, abs=1e-6)
def test_score(self, model):
# method = "max"
assert model.score(method="max") == pytest.approx(28044.894016038183, abs=1e-6)
# method = "mae"
assert model.score(method="mae") == pytest.approx(1089.45677026604, abs=1e-6)
# method = "median"
assert model.score(method="median") == pytest.approx(337.270572384534, abs=1e-6)
# method = "mse"
assert model.score(method="mse") == pytest.approx(4992560.88725705, abs=1e-6)
# method = "rmse"
assert model.score(method="rmse") == pytest.approx(2234.4039221360695, abs=1e-6)
# method = "r2"
assert model.score() == pytest.approx(-0.823855565723365, abs=1e-6)
# method = "r2a"
assert model.score(method="r2a") == pytest.approx(-0.8249988696354311, abs=1e-6)
# method = "var"
assert model.score(method="var") == pytest.approx(-0.755650497621303, abs=1e-6)
# method = "aic"
assert model.score(method="aic") == pytest.approx(98504.22202646323, abs=1e-6)
# method = "bic"
assert model.score(method="bic") == pytest.approx(98538.0219389409, abs=1e-6)
def test_set_cursor(self, model):
cur = vertica_conn(
"vp_test_config",
os.path.dirname(verticapy.__file__) + "/tests/verticaPy_test_tmp.conf",
).cursor()
model.set_cursor(cur)
model.cursor.execute("SELECT 1;")
result = model.cursor.fetchone()
assert result[0] == 1
def test_set_params(self, model):
model.set_params({"p": 2})
assert model.get_params()["p"] == 2
def test_model_from_vDF(self, base, amazon_vd):
model_class = SARIMAX("sarimax_model_test_vdf", cursor=base.cursor, p=1, d=1, q=1, s=12, P=1, D=1, Q=1, max_pik=20)
model_class.drop()
model_class.fit(amazon_vd, "number", "date",)
base.cursor.execute(
"SELECT model_name FROM verticapy.models WHERE model_name IN ('sarimax_model_test_vdf', '\"sarimax_model_test_vdf\"')"
)
assert base.cursor.fetchone()[0] in ("sarimax_model_test_vdf", '"sarimax_model_test_vdf"')
model_class.drop()
``` |
{
"source": "jmiszczak/matthew_reduction_game",
"score": 3
} |
#### File: src/scripts/boltzmann-gibbs.py
```python
import scipy.stats as sps
import numpy as np
import pandas as pd
#%% local functions
script_path = ""
import os
try:
script_path = os.path.dirname(__file__)
os.chdir(script_path)
except FileNotFoundError:
script_path = os.getcwd()
else:
script_path = os.getcwd()
import sys
sys.path.append("..")
def gini_index(vec):
agent_wealths = sorted(vec)
N = len(vec)
#return sum([(2*(i+1)-N-1)*x for i,x in enumerate(agent_wealths) ])/(N*sum(agent_wealths))
return sum([abs(xi-xj) for xi in agent_wealths for xj in agent_wealths ]) /(2*N*sum(agent_wealths))
def hoover_index(vec):
agent_wealths = sorted(vec)
N = len(vec)
mean_wealth = sum(agent_wealths)/N
return sum([abs(xi-mean_wealth) for xi in agent_wealths])/(2*sum(agent_wealths))
def theil_index(vec):
agent_wealths = sorted(vec)
N = len(vec)
mean_wealth = sum(agent_wealths)/N
return 1/N*sum([(xi/mean_wealth)*np.log(xi/mean_wealth) for xi in filter(lambda x:x>0,agent_wealths)])
#%% deriviation of the shape parameters
# lambda_ - inverse temperature
# N - {0,1,..,N} -support of the distribution
# number of agents,
# each agent has m_i=2+i money, i-1,2,3,..,num_agents
#%% main loop
ineq_index_data_bg = pd.DataFrame([],columns=['num_agents',
'min_gini', 'max_gini', 'median_gini', 'mean_gini',
'min_hoover', 'max_hoover', 'median_hoover', 'mean_hoover',
'min_theil', 'max_theil', 'median_theil', 'mean_theil'])
for num_agents in range(20, 201, 20):
initial_capital = 20
sum_money = (1+num_agents)*(2*initial_capital+num_agents)//2
lambda_, N = num_agents/sum_money, sum_money
print("agents: "+str(num_agents))
print("money: "+str(sum_money))
sample = [ sps.boltzmann.rvs(lambda_, N, size=num_agents) for _ in range(10000)]
sample_gini = list(map(gini_index,sample))
sample_hoover = list(map(hoover_index,sample))
sample_theil = list(map(theil_index,sample))
tmp_df = pd.DataFrame(
[(num_agents, min(sample_gini), max(sample_gini), np.mean(sample_gini), np.median(sample_gini),
min(sample_hoover), max(sample_hoover), np.mean(sample_hoover), np.median(sample_hoover),
min(sample_theil), max(sample_theil), np.mean(sample_theil), np.median(sample_theil))],
columns=['num_agents','min_gini','max_gini', 'median_gini', 'mean_gini',
'min_hoover', 'max_hoover', 'median_hoover', 'mean_hoover',
'min_theil', 'max_theil', 'median_theil', 'mean_theil'])
ineq_index_data_bg = ineq_index_data_bg.append(tmp_df,ignore_index=True)
#%% data saving
ineq_index_data_bg.to_csv("data/ineq_index_values-boltzmann-gibbs.zip", index=False, compression=dict(method='zip', archive_name='data.csv'))
```
#### File: src/scripts/gini_plot.py
```python
import os
script_path = ""
try:
script_path = os.path.dirname(__file__)
os.chdir(script_path)
except FileNotFoundError:
script_path = os.getcwd()
else:
script_path = os.getcwd()
import sys
sys.path.append("..")
from IPython.core.display import display
import matplotlib as mpl
mpl.rc('text', usetex = True)
mpl.rc('font', size = 10)
import numpy as np
import pandas as pd
#%% plot of the Gini and Hoover index for the constant difference initialization
def gini_index_const_calc(init_wealth,num_agents):
agents_capital = [init_wealth+i for i in range(num_agents)]
return sum([abs(xi-xj) for xi in agents_capital for xj in agents_capital ]) /(2*num_agents*sum(agents_capital))
def hoover_index_const_calc(init_wealth,num_agents):
agents_capital = [init_wealth+i for i in range(num_agents)]
mean_capital = (1/num_agents)*sum(agents_capital)
return (0.5/sum(agents_capital))*sum([abs(xi - mean_capital) for xi in agents_capital])
# data for selected initial_capital
initial_capital = 20
gini_data_const = [gini_index_const_calc(initial_capital,n) for n in range(20,201,20)]
hoover_data_const = [hoover_index_const_calc(initial_capital,n) for n in range(20,201,20)]
np.savetxt(script_path + "/data/gini_index_values-constant.dat",gini_data_const)
# %% Gini, Hoover and Theil
fig = mpl.figure.Figure(figsize=(3.25,2.4375))
axs = fig.add_subplot()
# axs.set_xlim((15,145))
axs.set_xticks(range(20,206,20))
axs.grid(alpha=0.75,ls=':')
axs.set_ylim((0.1,0.8))
axs.set_yticks(np.arange(0.0,0.81,0.1))
axs.set_ylabel('Inequality index')
axs.set_xlabel('Number of agents')
axs.plot(list(np.arange(20,202,20)),gini_data_const,'b.--',label="Gini",markersize='12',fillstyle='none')
axs.plot(list(np.arange(20,202,20)),hoover_data_const,'g+:', label="Hoover")
# axs.plot(list(np.arange(20,142,20)),theil_data_conts[20:141:20],'rx-.', label="Theil")
fig.legend(loc='upper center',ncol=1,bbox_to_anchor=(0.7,0.85))
display(fig)
fig.tight_layout()
fig.savefig("plots/indices_values-constant.pdf")
# # %% Gini index only
# fig = mpl.figure.Figure(figsize=(3.25,2.4375))
# axs = fig.add_subplot()
# axs.set_xlim((15,145))
# axs.set_xticks(range(25,146,20))
# axs.grid(alpha=0.75,ls=':')
# axs.set_ylim((0.1,0.8))
# axs.set_yticks(np.arange(0.1,0.81,0.1))
# axs.set_ylabel('Inequality index')
# axs.set_xlabel('Number of agents')
# axs.plot(list(np.arange(20,142,10)),gini_data_const,'x:')
# display(fig)
# fig.tight_layout()
# fig.savefig("../plots/gini_index_values-constant.pdf")
#%% plot of the Gini index for the constant initialization
# gini_data_bg = pd.read_csv(script_path + "data/gini_index_values-boltzmann-gibbs.zip")
ineq_data_bg = pd.read_csv(script_path + "/data/ineq_index_values-boltzmann-gibbs.zip")
#%%
fig = mpl.figure.Figure(figsize=(3.25,2.4375))
axs = fig.add_subplot()
# axs.set_xlim((15,145))
axs.set_xticks(range(20,206,20))
axs.set_yticks(np.arange(0.0,0.81,0.1))
axs.grid(alpha=0.75,ls=':')
axs.set_ylim((0.1,0.8))
# axs.set_ylabel('Gini index')
axs.set_xlabel('Number of agents')
# axs.plot(gini_data_bg['num_agents'], gini_data_bg['min'],'bx:', label="min")
# axs.plot(gini_data_bg['num_agents'], gini_data_bg['max'],'r+:', label="max")
axs.plot(ineq_data_bg['num_agents'], ineq_data_bg['mean_gini'],'b.--', label="Gini",markersize='12',fillstyle='none')
axs.fill_between(ineq_data_bg['num_agents'],ineq_data_bg['min_gini'], ineq_data_bg['max_gini'],alpha=0.3, color='darkblue',linestyle='--')
axs.plot(ineq_data_bg['num_agents'], ineq_data_bg['mean_hoover'],'g+:', label="Hoover",color='darkgreen')
axs.fill_between(ineq_data_bg['num_agents'],ineq_data_bg['min_hoover'], ineq_data_bg['max_hoover'],alpha=0.3, color='green',linestyle=':')
#
# axs.plot(ineq_data_bg['num_agents'], ineq_data_bg['mean_theil'],'rx-', label="Theil")
# axs.fill_between(ineq_data_bg['num_agents'],ineq_data_bg['min_theil'], ineq_data_bg['max_theil'],alpha=0.25,color='red',capstyle='round')
# axs.legend(ncol=3,loc='lower right', fontsize=10, labelspacing=0, columnspacing=1)
display(fig)
fig.tight_layout()
fig.savefig("plots/ineq_values-boltzman-gibbs.pdf")
#%% plot of the Gini index for both initilizations
# fig = mpl.figure.Figure(figsize=(3.25,2.4375))
# axs = fig.add_subplot()
# axs.set_xlim((15,145))
# axs.grid()
# axs.set_ylim((0.1,0.8))
# axs.set_ylabel('Gini index')
# axs.set_xlabel('Number of agents')
# axs.plot(gini_data_bg['num_agents'], gini_data_bg['min'],'bx--', label="B-G (min)")
# axs.plot(gini_data_bg['num_agents'], gini_data_bg['max'],'r+-.', label="B-G (max)")
# axs.plot(gini_data_bg['num_agents'], gini_data_bg['mean'],'g.:', label="B-G (mean)")
# # axs.plot(gini_data_bg['num_agents'], gini_data_bg['mean'],'k.:', label="mean")
# axs.plot(list(np.arange(20,142,10)),gini_data_conts[20:141:10],'k:', label="const.", linewidth=2)
# axs.legend(ncol=2,loc='best', fontsize=10, labelspacing=0, columnspacing=1)
# display(fig)
# fig.tight_layout()
# fig.savefig("../plots/gini_values.pdf")
``` |
{
"source": "jmiszczak/nisq-qprog-tutorial",
"score": 3
} |
#### File: nisq-qprog-tutorial/qaoa-pennylane/example2.py
```python
import pennylane as qml
t = 1
n = 2
dev = qml.device('default.qubit', wires=n)
def circ(theta):
qml.RX(theta, wires=0)
qml.Hadamard(wires=1)
qml.CNOT(wires=[0,1])
# @qml.qnode(dev)
# def circuit(param):
# circ(param)
# return [qml.expval(qml.PauliZ(i)) for i in range(n)]
#
# print("")
# circuit(0.5)
# print(circuit.draw())
# this is quantum circuit function
# it must return a measurement
@qml.qnode(dev)
def circuit(params, **kwargs):
qml.layer(circ, 3, params)
return [qml.expval(qml.PauliZ(i)) for i in range(n)]
print("")
circuit([0.3, 0.4, 0.5])
print(circuit.draw())
``` |
{
"source": "jmitchel3/django-hackref",
"score": 2
} |
#### File: django-hackref/hackref/admin.py
```python
import datetime
from django.contrib import admin
from django.utils import timezone
now = timezone.now()
today_start = datetime.datetime.combine(now, datetime.time.min)
today_end = datetime.datetime.combine(now, datetime.time.max)
# Register your models here.
from .models import ReferralLink, ReferredUser, ReferralCodeClick
admin.site.register(ReferralCodeClick)
class ReferralLinkAdmin(admin.ModelAdmin):
readonly_fields = ["code", "referred_users", "todays_users", "clicks"]
class Meta:
model = ReferralLink
def referred_users(self, obj):
html = obj.get_referred_as_ul()
return html
referred_users.allow_tags = True
def todays_users(self, obj):
users = obj.get_referred_by_date(today_start, today_end)
html = obj.get_referred_as_ul(users=users)
return html
todays_users.allow_tags = True
def clicks(self, obj):
return obj.count
admin.site.register(ReferralLink, ReferralLinkAdmin)
admin.site.register(ReferredUser)
```
#### File: django-hackref/hackref/views.py
```python
import re
from django.conf import settings
from django.contrib import messages
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse, NoReverseMatch
from django.views.generic import RedirectView, View
from django.shortcuts import render, get_object_or_404
# Create your views here.
from .models import ReferredUser, ReferralLink
from .utils import get_redirect_path, clean_message
CODE_DNE_REDIRECT = getattr(settings, "HACKREF_CODE_DOES_NOT_EXIST_REDIRECT", "/") #Referral Code does not exist
CODE_DNE_MESSAGE =getattr(settings, "HACKREF_CODE_DOES_NOT_EXIST_MESSAGE", "This code does not exist")
CODE_DNE_DISPLAY = getattr(settings, "HACKREF_CODE_DOES_NOT_EXIST_MESSAGE_DISPLAY", False)
USER_EXISTS_REDIRECT = getattr(settings, "HACKREF_USER_EXISTS_REDIRECT", "/" )
USER_EXISTS_MESSASE = getattr(settings, "HACKREF_USER_EXISTS_MESSASE", "You are already logged in.")
USER_EXISTS_MESSASE_DISPLAY = getattr(settings,"HACKREF_USER_EXISTS_MESSASE_DISPLAY", False)
REDIRECT_SUCCESS_NAME = getattr(settings, "HACKREF_REDIRECT_SUCCESS_NAME", "account_signup")
REDIRECT_SUCCESS_MESSSAGE = getattr(settings, "HACKREF_REDIRECT_SUCCESS_MESSSAGE", "Referral Counted")
REDIRECT_SUCCESS_MESSSAGE_DISPLAY = getattr(settings, "HACKREF_REDIRECT_SUCCESS_MESSSAGE_DISPLAY", False)
COMPLETED_MESSAGE = getattr(settings, "HACKREF_COMPLETED_MESSAGE", "Referral Completed")
COMPLETED_MESSAGE_DISPLAY = getattr(settings, "HACKREF_COMPLETED_MESSAGE_DISPLAY", False)
class CodeTrackingView(RedirectView):
permanent = False
query_string = True
pattern_name = 'ref-code-redirect'
def get_redirect_url(self, *args, **kwargs):
user = self.request.user
if user.is_authenticated():
"""
Error: User exists. Ignore Referral
"""
if USER_EXISTS_MESSASE_DISPLAY == True:
messages.error(self.request, clean_message("USER_EXISTS_MESSASE", USER_EXISTS_MESSASE))
return get_redirect_path("USER_EXISTS_REDIRECT", USER_EXISTS_REDIRECT)
code = self.kwargs.get("code")
link_qs = ReferralLink.objects.filter(code=code)
if not code or link_qs.count() != 1:
"""
Error: Referral code does not exists. Do Redirect
"""
if CODE_DNE_DISPLAY == True:
messages.error(self.request, clean_message("CODE_DNE_MESSAGE", CODE_DNE_MESSAGE))
return get_redirect_path("CODE_DNE_REDIRECT", CODE_DNE_REDIRECT)
if link_qs.count() == 1:
"""
Successs: Referral Code Exists.
"""
link_obj = link_qs.first()
self.request.session['referral_link_id'] = link_obj.id
link_obj.add_one(user)
if REDIRECT_SUCCESS_MESSSAGE_DISPLAY == True:
messages.success(self.request, clean_message("REDIRECT_SUCCESS_MESSSAGE", REDIRECT_SUCCESS_MESSSAGE))
return get_redirect_path("REDIRECT_SUCCESS_NAME", REDIRECT_SUCCESS_NAME)
``` |
{
"source": "JMitnik/FacialDebiasing",
"score": 3
} |
#### File: FacialDebiasing/code/vae_model.py
```python
from typing import List, Union, Optional
import torch
import os
from logger import logger
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from scipy.stats import norm
class Encoder(nn.Module):
"""
Encodes the data using a CNN
Input => 64x64 image
Output => mean vector z_dim
log_std vector z_dim
predicted value
"""
def __init__(self, z_dim: int = 20, custom_layers: Optional[nn.Sequential] = None):
super().__init__()
self.z_dim = z_dim
self.layers = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=5, stride=2),
nn.LeakyReLU(),
nn.BatchNorm2d(64),
nn.Conv2d(64, 128, kernel_size=5, stride=2),
nn.LeakyReLU(),
nn.BatchNorm2d(128),
nn.Conv2d(128, 256, kernel_size=5, stride=2),
nn.LeakyReLU(),
nn.BatchNorm2d(256),
nn.Conv2d(256, 512, kernel_size=5, stride=2),
nn.LeakyReLU(),
nn.BatchNorm2d(512),
nn.Flatten(),
nn.Linear(512, 1000),
nn.LeakyReLU(),
nn.Linear(1000, z_dim*2+1)
)
def forward(self, input: torch.Tensor):
"""
Perform forward pass of encoder.
"""
out = self.layers(input)
# return classification, mean and log_std
return out[:, 0], out[:, 1:self.z_dim+1], F.softplus(out[:,self.z_dim+1:])
class UnFlatten(nn.Module):
def __init__(self, channel_size, image_size):
super(UnFlatten, self).__init__()
self.channel_size = channel_size
self.image_size = image_size
def forward(self, input):
return input.view(-1, self.channel_size, self.image_size, self.image_size)
class Decoder(nn.Module):
"""
Encodes the data using a CNN
Input => sample vector z_dim
Output => 64x64 image
4 6 13 29 61
"""
def __init__(self, z_dim: int = 20, custom_layers: Optional[nn.Sequential] = None):
super().__init__()
self.layers = nn.Sequential(
nn.Linear(z_dim, 1000),
nn.LeakyReLU(),
nn.Linear(1000, 512*1*1),
UnFlatten(512, 1),
nn.ConvTranspose2d(512, 256, kernel_size=5, stride=2),
nn.LeakyReLU(),
nn.BatchNorm2d(256),
nn.ConvTranspose2d(256, 128, kernel_size=5, stride=2),
nn.LeakyReLU(),
nn.BatchNorm2d(128),
nn.ConvTranspose2d(128, 64, kernel_size=5, stride=2, output_padding=1),
nn.LeakyReLU(),
nn.BatchNorm2d(64),
nn.ConvTranspose2d(64, 3, kernel_size=5, stride=2, output_padding=1),
nn.Sigmoid()
)
def forward(self, input: torch.Tensor):
"""
Perform forward pass of encoder.
"""
out = self.layers(input)
return out
class Db_vae(nn.Module):
def __init__(
self,
z_dim: int = 20,
hist_size: int = 1000,
alpha: float = 0.01,
num_bins: int = 10,
device: str = "cpu",
custom_encoding_layers: Optional[nn.Sequential] = None,
custom_decoding_layers: Optional[nn.Sequential] = None
):
super().__init__()
self.device = device
self.z_dim = z_dim
self.encoder = Encoder(z_dim, custom_encoding_layers)
self.decoder = Decoder(z_dim, custom_decoding_layers)
self.target_dist = torch.distributions.normal.Normal(0, 1)
self.c1 = 1
self.c2 = 1
self.c3 = 0.1
self.num_bins = num_bins
self.min_val = -15
self.max_val = 15
self.xlin = np.linspace(self.min_val, self.max_val, self.num_bins).reshape(1,1,self.num_bins)
self.hist = np.zeros((z_dim, self.num_bins))
self.means = torch.Tensor().to(self.device)
self.std = torch.Tensor().to(self.device)
self.alpha = alpha
@staticmethod
def init(path_to_model: str, device: str, z_dim: int):
full_path_to_model = f"results/{path_to_model}/model.pt"
if not os.path.exists(full_path_to_model):
logger.error(
f"Can't find model at {full_path_to_model}",
next_step="Evaluation will stop",
tip="Double check your path to model"
)
raise Exception
model: Db_vae = Db_vae(z_dim=z_dim, device=device)
try:
model.load_state_dict(torch.load(full_path_to_model, map_location=device))
except:
logger.error("Unable to load model from {full_path_to_model}.",
next_step="Model will not initialize",
tip="Did you use the right config parameters, or custom layers from the stored model?"
)
logger.info(f"Loaded model from {path_to_model}!")
return model
def forward(self, images: torch.Tensor, labels: torch.Tensor):
"""
Given images, perform an encoding and decoding step and return the
negative average elbo for the given batch.
"""
pred, mean, std = self.encoder(images)
loss_class = F.binary_cross_entropy_with_logits(pred, labels.float(), reduction='none')
# We only want to calculate the loss towards actual faces
faceslicer = labels == 1
facemean = mean[faceslicer]
facestd = std[faceslicer]
# Get single samples from the distributions with reparametrisation trick
dist = torch.distributions.normal.Normal(facemean, facestd)
z = dist.rsample().to(self.device)
res = self.decoder(z)
# calculate VAE losses
loss_recon = (images[faceslicer] - res)**2
loss_recon = loss_recon.view(loss_recon.shape[0],-1).mean(1)
loss_kl = torch.distributions.kl.kl_divergence(dist, self.target_dist)
loss_kl = loss_kl.view(loss_kl.shape[0],-1).mean(1)
loss_vae = self.c2 * loss_recon + self.c3 * loss_kl
loss_total = self.c1 * loss_class
# Only add loss to positions of faces, rest is zero
zeros = torch.zeros(faceslicer.shape[0]).to(self.device)
zeros[faceslicer] = loss_vae
loss_total = loss_total + zeros
return pred, loss_total
def forward_eval(self, images: torch.Tensor):
"""
Given images, perform an encoding and decoding step and return the
negative average elbo for the given batch.
"""
with torch.no_grad():
pred, _,_ = self.encoder(images)
return pred
def interpolate(self, images: torch.Tensor, amount: int):
with torch.no_grad():
_, mean, std = self.encoder(images)
mean_1, std_1 = mean[0,:], std[0,:]
mean_2, std_2 = mean[1,:], std[1,:]
all_mean = torch.tensor([]).to(self.device)
all_std = torch.tensor([]).to(self.device)
diff_mean = mean_1 - mean_2
diff_std = std_1 = std_2
steps_mean = diff_mean / (amount-1)
steps_std = diff_std / (amount-1)
for i in range(amount):
all_mean = torch.cat((all_mean, mean_1 - steps_mean*i))
all_std = torch.cat((all_std, std_1 - steps_std*i))
all_mean = all_mean.view(amount, -1)
all_std = all_std.view(amount, -1)
dist = torch.distributions.normal.Normal(all_mean, all_std)
z = dist.rsample().to(self.device)
recon_images = self.decoder(z)
return recon_images
def build_means(self, input: torch.Tensor):
_, mean, log_std = self.encoder(input)
self.means = torch.cat((self.means, mean))
return
def build_histo(self, input: torch.Tensor):
"""
Creates histos or samples Qs from it
NOTE:
Make sure you only put faces into this
functions
"""
_, mean, std = self.encoder(input)
self.means = torch.cat((self.means, mean))
self.std = torch.cat((self.std, std))
values = norm.pdf(self.xlin, mean.unsqueeze(-1).cpu(), std.unsqueeze(-1).cpu()).sum(0)
self.hist += values
return
def get_histo_max(self):
probs = torch.zeros_like(self.means[:,0]).to(self.device)
for i in range(self.z_dim):
dist = self.means[:,i].cpu().numpy()
hist, bins = np.histogram(dist, density=True, bins=self.num_bins)
bins[0] = -float('inf')
bins[-1] = float('inf')
bin_idx = np.digitize(dist, bins)
hist = hist + self.alpha
hist /= np.sum(hist)
p = 1.0/(hist[bin_idx-1])
p /= np.sum(p)
probs = torch.max(probs, torch.Tensor(p).to(self.device))
probs /= probs.sum()
return probs
def get_histo_max5(self):
probs = torch.zeros_like(self.means, dtype=float).to(self.device)
for i in range(self.z_dim):
dist = self.means[:,i].cpu().numpy()
hist, bins = np.histogram(dist, density=True, bins=self.num_bins)
bins[0] = -float('inf')
bins[-1] = float('inf')
bin_idx = np.digitize(dist, bins)
hist = hist + self.alpha
hist /= np.sum(hist)
p = 1.0/(hist[bin_idx-1])
p /= np.sum(p)
probs[:,i] = torch.Tensor(p).to(self.device)
probs = probs.sort(1, descending=True)[0][:,:5]
probs = probs.prod(1)
print(probs)
return probs
def get_histo_gaussian(self):
"""
Returns the probabilities given the means given the histo values
"""
results = np.empty(self.means.shape[0])
hist_batch_size = 4000
# Iterate in large batches over dataset to prevent memory lockup
for i in range(0, self.means.shape[0], hist_batch_size):
i_end = i + hist_batch_size
if i_end > self.means.shape[0]:
i_end = self.means.shape[0]
mean = self.means[i:i_end, :]
std = self.std[i:i_end, :]
lins = norm.pdf(self.xlin, mean.unsqueeze(-1).cpu(), std.unsqueeze(-1).cpu())
Q = lins * self.hist
Q = Q.sum(-1)
W = 1 / (Q + self.alpha)
# Performing the max value technique, TODO: analyse top 5
results[i:i_end] = W.max(-1)
# # Reset values
self.hist.fill(0)
self.means = torch.Tensor().to(self.device)
self.std = torch.Tensor().to(self.device)
return torch.tensor(results).to(self.device)
def recon_images(self, images: torch.Tensor):
with torch.no_grad():
pred, mean, std = self.encoder(images)
# Get single samples from the distributions with reparametrisation trick
dist = torch.distributions.normal.Normal(mean, std)
z = dist.rsample().to(self.device)
recon_images = self.decoder(z)
# return predictions and the loss
return recon_images
def sample(self, n_samples, z_samples=[]):
"""
Sample n_samples from the model. Return both the sampled images
(from bernoulli) and the means for these bernoullis (as these are
used to plot the data manifold).
"""
with torch.no_grad():
z_samples = torch.randn(n_samples, self.z_dim).to(self.device)
sampled_images = self.decoder(z_samples)
return sampled_images
``` |
{
"source": "jmittelstaedt/gdq-donations",
"score": 3
} |
#### File: src/data/scrape_runs_vods.py
```python
import sys
import gzip
import json
from pathlib import Path
import logging
from seleniumwire import webdriver
VOD_URL = "https://gdqvods.com/category/all/"
PROJECT_FOLDER = Path(__file__).resolve().parents[2]
LOG_FILE = PROJECT_FOLDER / "project.log"
logger = logging.getLogger(__name__)
def main():
logger.info("Downloading run and VOD info json using Selenium")
ffopt = webdriver.FirefoxOptions()
ffopt.headless = True
driver = webdriver.Firefox(options=ffopt)
driver.get(VOD_URL)
logger.info("VOD webpage requested successfully")
requests = [x for x in driver.requests if "categoryRuns" in x.url]
if len(requests) > 1:
logger.error("VOD requests not filtered properly! Filter update needed")
raise ValueError("Too many VOD requests found! update filter")
response_decoded = gzip.decompress(requests[0].response.body)
response_json = json.loads(response_decoded)
logger.info("Saving run and VOD info json")
with open(PROJECT_FOLDER / "data" / "external" / "run_data.json", "w") as f:
json.dump(response_json, f)
driver.close()
if __name__ == "__main__":
log_fmt = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
logging.basicConfig(
level=logging.INFO,
format=log_fmt,
handlers=[logging.FileHandler(LOG_FILE), logging.StreamHandler(sys.stdout)],
)
main()
``` |
{
"source": "jmittelstaedt/xfit",
"score": 2
} |
#### File: xfit/tests/fitting_test.py
```python
from xfit.fitting import fit_dataArray, fit_dataArray_models, fit_dataset
import numpy as np
import xarray as xr
from xarray.testing import assert_equal, assert_allclose
bs = xr.DataArray(np.linspace(0,10,6), coords={'b_true': np.linspace(0,10,6)}, dims='b_true')
xs = xr.DataArray(np.linspace(0,10,11), coords={'x': np.linspace(0,10,11)}, dims='x')
data = xr.DataArray(
np.ones((1,10))*np.arange(5).reshape(5,1),
dims=['b_true','x'],
coords={
'b_true': np.arange(5),
'x': np.arange(10)
}
)
data_ds = xr.Dataset({'data': data})
expected_popt = xr.DataArray(np.arange(5).reshape(1,5),
coords={'param': ['b'], 'b_true': np.arange(5)},
dims=['param', 'b_true'])
expected_perr = xr.DataArray(np.zeros((1,5)),
coords={'param': ['b'], 'b_true': np.arange(5)},
dims=['param', 'b_true'])
expected_pcov = xr.DataArray(np.zeros((1,1,5)),
coords={'param_cov': ['b'], 'param': ['b'], 'b_true': np.arange(5)},
dims=['param_cov', 'param', 'b_true'])
expected_xda = data.coords['x']
expected_yda = data
expected_yerr_da = xr.full_like(expected_yda, np.nan, float)
def const(x, b):
return b
def const_guess(x, y, **kwargs):
return np.mean(y)
const_params = ['b']
def test_basic_fit_dataArray():
actual = fit_dataArray(data, const, const_guess, const_params, 'x')
expected = xr.Dataset(
{
'popt': expected_popt,
'perr': expected_perr,
'pcov': expected_pcov,
'xda': expected_xda,
'yda': expected_yda,
'yerr_da': expected_yerr_da
},
attrs={'fit_func': const, 'param_names': ['b'], 'xname': 'x', 'yname': None})
assert_equal(expected, actual)
def test_basic_fit_dataset():
actual = fit_dataset(data_ds, const, const_guess, const_params, 'x', 'data')
expected = xr.Dataset(
{
'popt': expected_popt,
'perr': expected_perr,
'pcov': expected_pcov,
'xda': expected_xda,
'yda': expected_yda,
'yerr_da': expected_yerr_da
},
attrs={'fit_func': const, 'param_names': ['b'], 'xname': 'x', 'yname': None})
assert_equal(expected, actual)
```
#### File: xfit/xfit/fitting.py
```python
from inspect import getfullargspec
from typing import (
TYPE_CHECKING,
Callable,
Mapping,
Optional,
Sequence,
Tuple,
Union,
Hashable
)
import numpy as np
import xarray as xr
from scipy.optimize import leastsq, curve_fit
from .utils import da_filter, gen_coord_combo, gen_sim_da
from .models import fitModel
if TYPE_CHECKING:
from xarray import DataArray, Dataset
def make_fit_dataArray_guesses(
yda: 'DataArray',
guess_func: Callable[[Sequence[float], Sequence[float]], Sequence[float]],
param_names: Sequence[str],
xname: str,
xda: 'DataArray',
guess_func_help_params: Mapping[str, float] = {}
) -> 'DataArray':
"""
creates a dataset of guesses of param_names given ``guess_func``. To be used
in :func:`~.fit_dataArray`.
Parameters
----------
yda : xarray.DataArray
data array containing data to fit to
guess_func : function
function to generate guesses of parameters. Arguments must be:
- numpy array of x data
- 1D numpy array of y data
- keyword arguments, with the keywords being all dims of ds besides
xname. Values passed will be individual floats of coordinate values
corresponding to those dims.
All arguments must be accepted, not all must be used.
As a hint, if designing for unexpected dims you can include **kwargs at
the end. This will accept any keword arguments not explicitly defined
by you and just do nothing with them.
Must return a list of guesses to the parameters, in the order given in
param_names
param_names : list of str
list of fit parameter names, in the order they are returned
by ``guess_func``
xname : str
the name of the ``dim`` of ``da`` to be fit along
xda : xarray.DataArray
DataArray containing the independent variable. The dims of this DataArray
must be a subset of yda's dims, must include ``xname`` as a dim, and
coordinates must match along dims that they share.
guess_func_help_params : dict
Dictionary of any "constant" parameters to help in generating fit
guesses. Passed as keyword arguments to ``guess_func``.
Returns
-------
xarray.Dataset
A Dataset with param_names as data_vars containing all guesses, and all
``dims`` of ``ds`` besides xname with the same coordinates, unless
otherwise specified in ``**selections``.
"""
xdims = xda.dims
combo_dims, coord_combos = gen_coord_combo(yda, [xname])
# Generate empty dataset to contain parameter guesses
guess_da = gen_sim_da(yda, [xname], {'param': param_names})
# apply guess_func for each coord combo and record param guesses
for combo in coord_combos:
selection_dict = dict(zip(combo_dims, combo))
xselection_dict = {k: v for k, v in selection_dict.items() if k in xdims}
# load x/y data for this coordinate combination
ydata = yda.sel(selection_dict).values
xdata = xda.sel(xselection_dict).values
# Deal with any possible spurious data
if np.all(np.isnan(ydata)):
# there is no meaningful data. Fill guesses with nan's
continue
else:
# remove bad datapoints
good_pts = np.logical_and(np.isfinite(ydata), np.isfinite(xdata))
xdata = xdata[good_pts]
ydata = ydata[good_pts]
# generate guesses
guesses = guess_func(xdata, ydata, **guess_func_help_params, **selection_dict)
# record fit parameters and their errors
guess_da.loc[selection_dict] = np.asarray(guesses)
return guess_da
def fit_dataArray(
yda: 'DataArray',
fit_func: Callable[[Sequence[float], float], Sequence[float]],
guess_func: Callable[[Sequence[float], Sequence[float]], Sequence[float]],
param_names: Sequence[str],
xname: str,
xda: Optional['DataArray'] = None,
yname: Optional[str] = None,
yerr_da: Optional['DataArray'] = None,
guess_func_help_params: Mapping[str, float] = {},
ignore_faliure: bool = False,
selections: Mapping[str, Union[Hashable, Sequence[Hashable]]] = {},
omissions: Mapping[str, Union[Hashable, Sequence[Hashable]]] = {},
ranges: Mapping[str, Tuple[float, float]] = {},
**kwargs) -> 'Dataset':
"""
Fits values in a data array to a function. Returns an
:class:`~.fitResult` object
Parameters
----------
yda : xarray.DataArray
Dataset containing data to be fit.
fit_func : function
function to fit data to
guess_func : function
function to generate guesses of parameters. Arguments must be:
- numpy array of x data
- 1D numpy array of y data
- keyword arguments, with the keywords being all dims of ds besides
xname. Values passed will be individual floats of coordinate values
corresponding to those dims.
As a hint, if designing for unexpected dims you can include ``**kwargs``
at the end. This will accept any keword arguments not explicitly defined
by you and just do nothing with them.
All arguments must be accepted, not all must be used. fit results
Must return a list of guesses to the parameters, in the order given in
``param_names``
param_names : list of str
list of fit parameter names, in the order they are returned
by guess_func
xname : str
the name of the ``dim`` of ``da`` to be fit along
xda : xarray.DataArray or None
If given, dataArray containing data to use as the dependent variable
in the fits. Must include ``xname`` among its dims and have a
subset of the coords of ``yda``
yname : str or None
Optional. The name of the y data being fit over.
yerr_da : xarray.DataArray or None
Optional. If provided, must be a data array containing errors in the
data contained in ``da``. Must have the same coordinates as ``da``
guess_func_help_params : dict
Dictionary of any "constant" parameters to help in generating fit
guesses. Passed as keyword arguments to ``guess_func``.
ignore_faliure : bool
if True, will ignore a ``RuntimeError`` from curve_fit that the
optimal parameters were not found, and will fill with nan and print
a message
selections : dict
Key is dimension name, values are coordinate values which we want to keep
omissions : dict
Key is dimension name, values are coordinate values which we want to omit
ranges : dict
Key is dimension name, values are a 2-iterable of a lower and upper limit
(inclusive) which we want to keep.
**kwargs
can be:
- names of ``dims`` of ``da``
values should eitherbe single coordinate values or lists of coordinate
values of those ``dims``. Only data with coordinates given by selections
are fit to . If no selections given, everything is fit to.
- kwargs of ``curve_fit``
Returns
-------
fitResult
Object containing all results of fitting and some convenience methods.
"""
selections = {} if selections is None else selections
omissions = {} if omissions is None else omissions
ranges = {} if ranges is None else ranges
if xda is None:
xda = yda.coords[xname]
# Account for selections in keyword arguments
for kw in kwargs:
if kw in yda.dims:
selections[kw] = kwargs[kw]
xselections = {dim: sel for dim, sel in selections.items() if dim in xda.dims}
xomissions = {dim: sel for dim, sel in omissions.items() if dim in xda.dims}
xranges = {dim: sel for dim, sel in ranges.items() if dim in xda.dims}
xda = da_filter(xda, selections=xselections, omissions=xomissions, ranges=xranges)
yda = da_filter(yda, selections=selections, omissions=omissions, ranges=ranges)
if yerr_da is not None:
yerr_da = da_filter(yerr_da, selections=selections, omissions=omissions, ranges=ranges)
guesses = make_fit_dataArray_guesses(
yda,
guess_func,
param_names,
xname,
xda,
guess_func_help_params
)
# Determine which kwargs can be passed to curve_fit
cf_argspec = getfullargspec(curve_fit)
lsq_argspec = getfullargspec(leastsq)
good_args = cf_argspec.args + lsq_argspec.args
cf_kwargs = {k: v for k, v in kwargs.items() if k in good_args}
# Get the selection and empty fit dataset
param_template = gen_sim_da(yda, [xname], {'param': param_names})
cov_template = gen_sim_da(yda, [xname], {'param': param_names, 'param_cov': param_names})
fit_ds = xr.Dataset({'popt': param_template, 'perr': param_template.copy(), 'pcov': cov_template})
combo_dims, coord_combos = gen_coord_combo(yda, [xname])
# Do the fitting
for combo in coord_combos:
selection_dict = dict(zip(combo_dims, combo))
xselection_dict = {k: v for k,v in selection_dict.items() if k in xda.dims}
# load x/y data for this coordinate combination
ydata = yda.sel(selection_dict).values
xdata = xda.sel(xselection_dict).values
if yerr_da is not None:
yerr = yerr_da.sel(selection_dict).values
else:
yerr = None
# load fit parameter guesses for this coordinate combination
guess = guesses.sel(selection_dict).values
# Deal with any possible spurious data
if np.all(np.isnan(ydata)):
# there is no meaningful data. Fill fit results with nan's
continue
else:
# remove bad datapoints
good_pts = np.logical_and(np.isfinite(ydata), np.isfinite(xdata))
if yerr_da is not None:
good_pts = np.logical_and(good_pts, np.isfinite(yerr))
yerr = yerr[good_pts]
xdata = xdata[good_pts]
ydata = ydata[good_pts]
if ydata.size < len(param_names):
print("Less finite datapoints than parameters at : ", selection_dict)
continue
# fit
try:
asig = True if yerr_da is not None else False
popt, pcov = curve_fit(fit_func, xdata, ydata, guess, yerr, absolute_sigma=asig, **cf_kwargs)
perr = np.sqrt(np.diag(pcov)) # from curve_fit documentation
except RuntimeError:
if ignore_faliure:
# leave filled with nan
continue
else:
raise
# record fit parameters and their errors
fit_ds['popt'].loc[selection_dict] = popt
fit_ds['perr'].loc[selection_dict] = perr
fit_ds['pcov'].loc[selection_dict] = pcov
fit_ds['xda'] = xda
fit_ds['yda'] = yda
fit_ds['yerr_da'] = yerr_da if yerr_da is not None else xr.full_like(yda, np.nan, dtype=float)
fit_ds.attrs['fit_func'] = fit_func
fit_ds.attrs['param_names'] = param_names
fit_ds.attrs['xname'] = xname
fit_ds.attrs['yname'] = yname
return fit_ds
def fit_dataset(
ds: 'Dataset',
fit_func: Callable[[Sequence[float], float], Sequence[float]],
guess_func: Callable[[Sequence[float], Sequence[float]], Sequence[float]],
param_names: Sequence[str],
xname: str,
yname: str,
xda_name: Optional[str] = None,
yerr_name: Optional[str] = None,
guess_func_help_params: Mapping[str, float] = {},
ignore_faliure: bool = False,
selections: Mapping[str, Union[Hashable, Sequence[Hashable]]] = {},
omissions: Mapping[str, Union[Hashable, Sequence[Hashable]]] = {},
ranges: Mapping[str, Tuple[float, float]] = {},
**kwargs
) -> 'Dataset':
"""
Fits values in a dataset to a function. Returns an
:class:`~.fitResult` object.
Convenience function which calls :func:`~.fit_dataArray`.
Parameters
----------
ds : xarray.Dataset
Dataset containing data to be fit.
fit_func : function
function to fit data to
guess_func : function
function to generate guesses of parameters. Arguments must be:
- numpy array of x data
- 1D numpy array of y data
- keyword arguments, with the keywords being all dims of ds besides
xname. Values passed will be individual floats of coordinate values
corresponding to those dims.
As a hint, if designing for unexpected dims you can include **kwargs at
the end. This will accept any keword arguments not explicitly defined
by you and just do nothing with them.
All arguments must be accepted, not all must be used.fit results
Must return a list of guesses to the parameters, in the order given in
``param_names``
param_names : list of str
list of fit parameter names, in the order they are returned
by guess_func
xname : str
the name of the ``dim`` of ``ds`` to be fit along
yname : str
the name of the containing data to be fit to
xda_name : str
Name of the data variable which contains the x data
yerr_name : str
Optional. the name of the ``data_var`` of ``ds`` containing errors in data
to be fit.
bootstrap_samples : int
Number of boostrap samples to draw to get beter statistics on the
parameters and their errors. If zero no boostrap resampling is done
guess_func_help_params : dict
Dictionary of any "constant" parameters to help in generating fit
guesses. Passed as keyword arguments to ``guess_func``.
ignore_faliure : bool
if True, will ignore a ``RuntimeError`` from curve_fit that the
optimal parameters were not found, and will fill with nan and print
a message
selections : dict
Key is dimension name, values are coordinate values which we want to keep
omissions : dict
Key is dimension name, values are coordinate values which we want to omit
ranges : dict
Key is dimension name, values are a 2-iterable of a lower and upper limit
(inclusive) which we want to keep.
**kwargs
can be:
- names of ``dims`` of ``da``
values should eitherbe single coordinate values or lists of coordinate
values of those ``dims``. Only data with coordinates given by selections
are fit to . If no selections given, everything is fit to.
- kwargs of ``curve_fit``
Returns
-------
fitResult
Result of the fitting
"""
yerr_da = None if yerr_name is None else ds[yerr_name]
xda = None if xda_name is None else ds[xda_name]
fit_da = ds[yname]
return fit_dataArray(fit_da, fit_func, guess_func, param_names, xname, xda,
yname, yerr_da, guess_func_help_params, ignore_faliure,
selections, omissions, ranges, **kwargs)
def fit_dataArray_models(
da: 'DataArray',
models: Union[fitModel, Sequence[fitModel]],
xname: str, # TODO: Explicitly add other arguments
**kwargs):
"""
fits a dataArray to a collection of models
Parameters
----------
da : xr.DataArray
DataArray to fit over
models : fitModel or list of fitModel
Models to use in fit. Will be combined additively
xname : str
dimension to fit over
"""
if isinstance(models, fitModel):
models = [models]
# make list of all unique parameters, with intersection of bounds if same
# parameter in multiple models
all_params = []
for m in models:
for param in m.params:
for p in all_params:
if p == param:
p.intersect(param)
break
else: # if not found in all_params
all_params.append(param)
def full_func(x, *args):
cumsum = 0
for m in models:
mparams = [args[all_params.index(p)] for p in m.params]
cumsum += m(x, *mparams)
return cumsum
# mean of estimate from each model TODO: should we do median instead?
def full_guess(x, y, **kwargs):
guesses = []
# generate parameter guesses from each model
for m in models:
model_guesses = np.atleast_1d(m.guess(x, y, **kwargs))
guesses.append(dict(zip([p.name for p in m.params], model_guesses)))
# take the mean of all guesses for each parameter
final_guesses = []
for param in all_params:
count = 0
cumsum = 0
for guess in guesses:
if param.name in guess:
count += 1
cumsum += guess[param.name]
else:
pass
final_guesses.append(cumsum/count)
return final_guesses
bounds = [[], []]
for p in all_params:
bounds[0].append(p.bounds[0])
bounds[1].append(p.bounds[1])
fit_ds = fit_dataArray(da, full_func, full_guess,
[p.name for p in all_params], xname,
bounds=tuple(bounds), **kwargs)
fit_ds.attrs['models'] = {m.name: m for m in models}
return fit_ds
def fit_dataset_models(ds, models, xname, yname, yerr_name=None, **kwargs):
"""
Fits a dataset to a collection of models, combined additively
Parameters
----------
ds : xr.Dataset
Dataset containing data to fit
models : list of fitModel
Models to fit to
xname : str
Name of coordinate to use as the independent variable
yname : str
name of dataArray to use as the dependent variable
yerr_name : str
name of dataArray to use as the errors in the dependent variable
"""
if yerr_name is not None:
if yerr_name not in ds.data_vars:
raise AttributeError('%s is not a data_var!'%yerr_name)
else:
yerr_da = ds[yerr_name]
else:
yerr_da=None
fit_da = ds[yname]
return fit_dataArray_models(fit_da, models, xname,
yname=yname, yerr_da=yerr_da, **kwargs)
```
#### File: xfit/xfit/models.py
```python
from typing import (
Callable,
Mapping,
Optional,
Sequence,
Tuple,
Union,
)
import numpy as np
class fitParameter:
"""
parameter used in fitting
Attributes
----------
name : str
name of the parameter
bounds : 2-tuple of float
a tuple of lower and upper bounds on the parameter
"""
def __init__(
self,
name: str,
bounds: Tuple[float, float] = (-np.inf, np.inf)
):
self.name = name
if bounds[0] > bounds[1]:
raise ValueError("Lower bound must be less than upper bound!")
self.bounds = bounds
def __eq__(self, other: 'fitParameter'):
""" equality determined by name only. maybe bad... idk """
return self.name == other.name
def __repr__(self):
return f"<fitParameter {self.name}, {self.bounds}>"
# TODO: what to do if bounds don't intersect?
def intersect(self, other: 'fitParameter') -> 'fitParameter':
"""
Returns a new fitParameter with bounds which are the intersection
of the initial ones. Names must be the same, and will be the name of the
result.
"""
if self.name == other.name:
return fitParameter(
self.name,
(
max(self.bounds[0], other.bounds[0]),
min(self.bounds[1], other.bounds[1])
)
)
class fitModel:
"""
Model to fit to
Attributes
----------
name : str
name of the model
func : callable
function which represents the model. First argument must be the
dependent variable, and the rest the model parameters
guess : callable
function which generates parameter guesses. If not given, guesses
are all 1. Must take an array for x and y values, and allow for keyword
arguments which will be dimension names where coordinate values for that
dimension will be passed.
params : list of str or fitParameter
Parameters of the model, in the order accepted by the model function
bounds : tuple
bounds for parameters, in the order accepted by model function. As would
be passed to ``scipy.optimize.curve_fit``. Only used if fit parameters
do not already have bounds.
"""
def __init__(
self,
name: str,
func: Callable[[Sequence[float], float], Sequence[float]],
params: Sequence[Union[fitParameter, str]],
guess: Optional[Callable[[Sequence[float], Sequence[float]], Sequence[float]]] = None,
bounds: Union[Tuple[float, float], Sequence[Tuple[float, float]]] = (-np.inf, np.inf)
):
self.name = name
self.func = func
# create parameter list
if isinstance(params[0], fitParameter):
self.params = params
elif isinstance(params[0], str):
self.params = []
# make proper bounds
for i, param in enumerate(params):
try:
lobound = bounds[0][i]
except TypeError:
lobound = bounds[0]
try:
upbound = bounds[1][i]
except TypeError:
upbound = bounds[1]
self.params.append(fitParameter(param, (lobound, upbound)))
else:
raise ValueError("Must have good values for the parameters")
# bounds tuple
self.bounds = ( tuple(x.bounds[0] for x in self.params),
tuple(x.bounds[1] for x in self.params) )
if guess is not None:
self.guess = guess
else:
def gf(*args, **kwargs):
return (1,)*len(self.params)
self.guess = gf
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
def __repr__(self):
rstr = f"<fitModel {self.name}>\nParameters:"
for p in self.params:
rstr += '\n '+str(p)
return rstr+'\n'
def rename_params(self, rename_dict: Mapping[str, str]) -> 'fitModel':
"""
Returns a new fitModel with different parameter names
"""
new_params = []
for p in self.params:
if p.name in rename_dict:
new_params.append(fitParameter(rename_dict[p.name], p.bounds))
else:
new_params.append(fitParameter(p.name, p.bounds))
return fitModel(self.name, self.func, new_params, self.guess)
``` |
{
"source": "jmitterh/web-scraping-challenge",
"score": 3
} |
#### File: jmitterh/web-scraping-challenge/scrape_mars.py
```python
from splinter import Browser
from bs4 import BeautifulSoup
from splinter import Browser
import pandas as pd
import requests
import time
# Connecting to chromedriver
def init_browser():
# @NOTE: Replace the path with your actual path to the chromedriver
executable_path = {'executable_path': 'chromedriver.exe'}
return Browser("chrome", **executable_path, headless=False)
# Master Scrape function
def scrape():
'''
Scrapping 5 url's relating to mars information:
* 1. Latest Mars News
* 2. Featured Mars Image
* 3. Current Weather on Mars
* 4. Mars Facts
* 5. Mars High-Definition Hemispheres
'''
# dict to store all data
mars_data = {}
# calling chromedriver function
browser = init_browser()
#1. ######## LATEST MARS NEWS ########
marsNews_url = "https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest"
browser.visit(marsNews_url)
time.sleep(1)
# Scrape page into Soup
html = browser.html
soup = BeautifulSoup(html, "html.parser")
slide = soup.find_all('li', class_='slide')[0]
title = slide.find('h3').text
news_p = slide.find('div', class_='article_teaser_body').text
# Store data in a dictionary
mars_data["title"] = title
mars_data["news_p"] = news_p
#2. ######## FEATURED MARS IMAGE ########
featuredImg_url = "https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
browser.visit(featuredImg_url)
time.sleep(1)
# Scrape page into Soup
html = browser.html
soup = BeautifulSoup(html, "html.parser")
fancybox = soup.find_all('a', class_='fancybox')[0]
img = fancybox.get('data-fancybox-href')
featured_image_url = f'https://www.jpl.nasa.gov/{img}'
# Store data in a dictionary
mars_data["featured_image_url"] = featured_image_url
#3. ######## CURRENT WEATHER ON MARS ########
marsWeather_url = "https://twitter.com/marswxreport?lang=en"
browser.visit(marsWeather_url)
time.sleep(1)
# Scrape page into Soup
html = browser.html
soup = BeautifulSoup(html, "html.parser")
mars_weather = soup.find('p', class_='TweetTextSize').text
# Store data in a dictionary
mars_data["mars_weather"] = mars_weather
#4. ######## MARS FACTS ########
marsFacts_url = "https://space-facts.com/mars/"
# Use pandas to scrape tables within the url
tables = pd.read_html(marsFacts_url)
# Grab the first DF of Mars Facts
df = tables[0]
# Give column names
df.columns = ['Description', 'Value']
# set index to the description column
df.set_index('Description', inplace=True)
# Convert DF to HTLM
html_table = df.to_html()
# Remove any new line and extra white spaces
html_table = html_table.replace('\n', ' ')
# Store data in a dictionary
mars_data["mars_facts"] = html_table
#5. ######## MARS HIGH-DEFINITION HEMISPHERES ########
marsHemi_url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(marsHemi_url)
time.sleep(1)
html = browser.html
soup = BeautifulSoup(html, "html.parser")
descriptions = soup.find_all('div', class_='description')
# Defining base url
base_url = 'https://astrogeology.us<EMAIL>'
# empty list for urls to get hd pic
url_list = []
# For loop to get url's with hd image
for desc in descriptions:
href = desc.find('a')['href']
url_list.append(base_url + href)
# list for dictionaries
hemisphere_image_urls = []
# For loop to iterate over list of url's to get url image and title
for i in url_list:
browser.visit(i)
time.sleep(1)
# Scrape page into Soup
html = browser.html
soup = BeautifulSoup(html, "html.parser")
title = soup.find('h2', class_='title').text
wideimage = soup.find('img', class_='wide-image').get('src')
img_url = base_url + wideimage
dict = {'title': title, 'img_url': img_url}
hemisphere_image_urls.append(dict)
# Store data in a dictionary
mars_data['hemisphere_image_urls'] = hemisphere_image_urls
# close the browser
browser.quit()
return mars_data
``` |
{
"source": "jmitz/daymetDataExtraction",
"score": 3
} |
#### File: jmitz/daymetDataExtraction/daymetFileDownload.py
```python
import urllib.request
import os
from datetime import date as dt
types = ['dayl', 'prcp', 'srad', 'swe', 'tmax', 'tmin', 'vp']
# http://thredds.daac.ornl.gov/thredds/catalog/ornldaac/1345/catalog.html?dataset=1345/daymet_v3_prcp_monttl_1980_na.nc4
def downloadError(inUrl, inFileName, e):
print("Error downloading {}".format(inFileName))
print("Errorcode is {}".format(e.getcode()))
os.remove(inFileName)
download(inUrl, inFileName)
def download(inUrl, inFileName):
try:
print("Downloading {}".format(inFileName))
urllib.request.urlretrieve(inUrl, inFileName)
print("{} download complete".format(inFileName))
except urllib.error.HTTPError as e:
if e.getcode() == 404:
print("{} not found".format(e.geturl()))
else:
downloadError(inUrl, inFileName, e)
except Exeception as e:
downloadError(inUrl, inFileName, e)
def getDailyFiles(inYear, inTypes):
urlTemplate = 'http://thredds.daac.ornl.gov/thredds/fileServer/ornldaac/1328/{0}/daymet_v3_{1}_{0}_na.nc4'
# fileTemplate = 'E:/daymet/Daily/{1}/daymet_v3_{1}_{0}_na.nc4'
fileTemplate = '../daymet/Daily/{1}/daymet_v3_{1}_{0}_na.nc4'
for type in inTypes:
if not os.path.isfile(fileTemplate.format(inYear, type)):
download(urlTemplate.format(inYear, type), fileTemplate.format(inYear, type))
else:
print("Skipping {} - {}".format(inYear, type))
def getMonthFiles(inYear, inTypes):
urlTemplate = 'http://thredds.daac.ornl.gov/thredds/fileServer/ornldaac/1345/daymet_v3_{1}_mon{2}_{0}_na.nc4'
fileTemplate = '../daymet/Monthly/{1}/daymet_v3_{1}_mon{2}_{0}_na.nc4'
for type in inTypes:
sumName = 'ttl' if (type == 'prcp') else 'avg'
if not os.path.isfile(fileTemplate.format(inYear, type, sumName)):
download(urlTemplate.format(inYear, type, sumName), fileTemplate.format(inYear, type, sumName))
else:
print("Skipping {} - {} - mon{}".format(inYear, type, sumName))
def getAnnualFiles(inYear, inTypes):
urlTemplate = 'http://thredds.daac.ornl.gov/thredds/fileServer/ornldaac/1343/daymet_v3_{1}_ann{2}_{0}_na.nc4'
fileTemplate = '../daymet/Annual/{1}/daymet_v3_{1}_ann{2}_{0}_na.nc4'
for type in inTypes:
sumName = 'ttl' if (type == 'prcp') else 'avg'
if not os.path.isfile(fileTemplate.format(inYear, type, sumName)):
download(urlTemplate.format(inYear, type, sumName), fileTemplate.format(inYear, type, sumName))
else:
print("Skipping {} - {} - anl{}".format(inYear, type, sumName))
def downloadDaymet(inStartYear=1980, inEndYear=dt.today().year):
for year in range(inStartYear, inEndYear):
getDailyFiles(year, ['dayl', 'prcp', 'srad', 'swe', 'tmax', 'tmin', 'vp'])
getMonthFiles(year, ['prcp', 'tmax', 'tmin', 'vp'])
getAnnualFiles(year, ['prcp', 'tmax', 'tmin', 'vp'])
``` |
{
"source": "jmizzoni/magictheslackening",
"score": 2
} |
#### File: magictheslackening/src/app.py
```python
from flask import Flask, request, jsonify
from .cardfetcher_utils import *
app = Flask('magictheslackening')
GATHERER_IMG_TPL = 'http://gatherer.wizards.com/Handlers/Image.ashx?multiverseid={}&type=card'
CARD_NOT_FOUND_ERR_TPL = 'Whoops, looks like {} isn\'t a magic card'
MTGSTOCKS_LINK_TPL = '<{}|MTGStocks.com> price for {}'
@app.route('/cardimage', methods=['POST'])
def fetch_card_image():
searchname = request.form['text']
card_obj = get_card_obj(searchname)
if not card_obj:
return jsonify({
'response_type': 'in_channel',
'text': CARD_NOT_FOUND_ERR_TPL.format(searchname),
})
img_url = GATHERER_IMG_TPL.format(card_obj['id'])
attachments = [{
'fallback': card_obj['name'],
'image_url': img_url
}]
resp = {
'response_type': 'in_channel',
'text': '',
'attachments': attachments,
}
return jsonify(resp)
@app.route('/oracletext', methods=['POST'])
def fetch_oracle_text():
searchname = request.form['text']
print(searchname)
card_obj = get_card_obj(searchname)
if not card_obj:
return jsonify({
'response_type': 'in_channel',
'text': CARD_NOT_FOUND_ERR_TPL.format(searchname),
})
print(card_obj)
card_attachment = {
'fallback': card_obj['name'],
'title': card_obj['name'],
'fields': [
{
'title': 'Mana Cost',
'value': format_mana(card_obj['manaCost']),
'short': True
},
{
'title': 'Types',
'value': '{} - {}'.format(card_obj['type'], card_obj['subType']),
'short': True
},
{
'title': 'Text',
'value': format_mana(card_obj['description']),
'short': False
}
]
}
if 'Creature' in card_obj['type']:
card_attachment['fields'].append({
'title': 'P/T',
'value': '{}/{}'.format(card_obj['power'], card_obj['toughness']),
'short': True
})
if 'Planeswalker' in card_obj['type']:
card_attachment['fields'].append({
'title': 'Loyalty',
'value': card_obj['loyalty'],
'short': True
})
resp = {
'response_type': 'in_channel',
'text': '',
'attachments': [card_attachment],
}
return jsonify(resp)
@app.route('/cardprice', methods=['POST'])
def fetch_card_price():
searchname = request.form['text']
args = searchname.split(':')[:2]
card_obj = get_card_price(*args)
if not card_obj:
return jsonify({
'response_type': 'in_channel',
'text': CARD_NOT_FOUND_ERR_TPL.format(searchname),
})
prices = card_obj['prices']
card_attachment = {
'fallback': card_obj['name'],
'title': card_obj['name'],
'fields': [
{
'title': 'Set',
'value': card_obj['set'],
'short': True
},
{
'title': 'Average',
'value': prices['avg'],
'short': True
}
]
}
if not card_obj['promo']:
card_attachment['fields'].extend(
[
{
'title': 'Low',
'value': prices['low'],
'short': True
},
{
'title': 'High',
'value': prices['high'],
'short': True
}
]
)
resp = {
'response_type': 'in_channel',
'text': '',
'attachments': [card_attachment],
}
return jsonify(resp)
``` |
{
"source": "jmj23/Kaggle-Pneumothorax",
"score": 3
} |
#### File: jmj23/Kaggle-Pneumothorax/Models.py
```python
import numpy as np
from keras.applications.inception_v3 import InceptionV3
from keras.initializers import RandomNormal
from keras.layers import (BatchNormalization, Conv2D, Conv2DTranspose, Conv3D,
Cropping2D, Dense, Flatten, GlobalAveragePooling2D,
Input, Lambda, MaxPooling2D, Reshape, UpSampling2D,
ZeroPadding2D, ZeroPadding3D, add, concatenate)
from keras.layers.advanced_activations import ELU, LeakyReLU
from keras.models import Model
# Parameterized 2D Block Model
def BlockModel2D(input_shape, filt_num=16, numBlocks=3):
"""Creates a Block CED model for segmentation problems
Args:
input shape: a list or tuple of [rows,cols,channels] of input images
filt_num: the number of filters in the first and last layers
This number is multipled linearly increased and decreased throughout the model
numBlocks: number of processing blocks. The larger the number the deeper the model
output_chan: number of output channels. Set if doing multi-class segmentation
regression: Whether to have a continuous output with linear activation
Returns:
An unintialized Keras model
Example useage: SegModel = BlockModel2D([256,256,1],filt_num=8)
Notes: Using rows/cols that are powers of 2 is recommended. Otherwise,
the rows/cols must be divisible by 2^numBlocks for skip connections
to match up properly
"""
use_bn = True
# check for input shape compatibility
rows, cols = input_shape[0:2]
assert rows % 2**numBlocks == 0, "Input rows and number of blocks are incompatible"
assert cols % 2**numBlocks == 0, "Input cols and number of blocks are incompatible"
# calculate size reduction
startsize = np.max(input_shape[0:2])
minsize = (startsize-np.sum(2**np.arange(1, numBlocks+1)))/2**numBlocks
assert minsize > 4, "Too small of input for this many blocks. Use fewer blocks or larger input"
# input layer
lay_input = Input(shape=input_shape, name='input_layer')
# contracting blocks
x = lay_input
skip_list = []
for rr in range(1, numBlocks+1):
x1 = Conv2D(filt_num*rr, (1, 1), padding='same',
name='Conv1_{}'.format(rr))(x)
if use_bn:
x1 = BatchNormalization()(x1)
x1 = ELU(name='elu_x1_{}'.format(rr))(x1)
x3 = Conv2D(filt_num*rr, (3, 3), padding='same',
name='Conv3_{}'.format(rr))(x)
if use_bn:
x3 = BatchNormalization()(x3)
x3 = ELU(name='elu_x3_{}'.format(rr))(x3)
x51 = Conv2D(filt_num*rr, (3, 3), padding='same',
name='Conv51_{}'.format(rr))(x)
if use_bn:
x51 = BatchNormalization()(x51)
x51 = ELU(name='elu_x51_{}'.format(rr))(x51)
x52 = Conv2D(filt_num*rr, (3, 3), padding='same',
name='Conv52_{}'.format(rr))(x51)
if use_bn:
x52 = BatchNormalization()(x52)
x52 = ELU(name='elu_x52_{}'.format(rr))(x52)
x = concatenate([x1, x3, x52], name='merge_{}'.format(rr))
x = Conv2D(filt_num*rr, (1, 1), padding='valid',
name='ConvAll_{}'.format(rr))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_all_{}'.format(rr))(x)
x = ZeroPadding2D(padding=(1, 1), name='PrePad_{}'.format(rr))(x)
x = Conv2D(filt_num*rr, (4, 4), padding='valid',
strides=(2, 2), name='DownSample_{}'.format(rr))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_downsample_{}'.format(rr))(x)
x = Conv2D(filt_num*rr, (3, 3), padding='same',
name='ConvClean_{}'.format(rr))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_clean_{}'.format(rr))(x)
skip_list.append(x)
# expanding blocks
expnums = list(range(1, numBlocks+1))
expnums.reverse()
for dd in expnums:
if dd < len(skip_list):
x = concatenate([skip_list[dd-1], x],
name='skip_connect_{}'.format(dd))
x1 = Conv2D(filt_num*dd, (1, 1), padding='same',
name='DeConv1_{}'.format(dd))(x)
if use_bn:
x1 = BatchNormalization()(x1)
x1 = ELU(name='elu_Dx1_{}'.format(dd))(x1)
x3 = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConv3_{}'.format(dd))(x)
if use_bn:
x3 = BatchNormalization()(x3)
x3 = ELU(name='elu_Dx3_{}'.format(dd))(x3)
x51 = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConv51_{}'.format(dd))(x)
if use_bn:
x51 = BatchNormalization()(x51)
x51 = ELU(name='elu_Dx51_{}'.format(dd))(x51)
x52 = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConv52_{}'.format(dd))(x51)
if use_bn:
x52 = BatchNormalization()(x52)
x52 = ELU(name='elu_Dx52_{}'.format(dd))(x52)
x = concatenate([x1, x3, x52], name='Dmerge_{}'.format(dd))
x = Conv2D(filt_num*dd, (1, 1), padding='valid',
name='DeConvAll_{}'.format(dd))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_Dall_{}'.format(dd))(x)
x = UpSampling2D(size=(2, 2), name='UpSample_{}'.format(dd))(x)
x = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConvClean1_{}'.format(dd))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_Dclean1_{}'.format(dd))(x)
x = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConvClean2_{}'.format(dd))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_Dclean2_{}'.format(dd))(x)
# classifier
lay_out = Conv2D(1, (1, 1), activation='sigmoid', name='output_layer')(x)
return Model(lay_input, lay_out)
# Parameterized 2D Block Model
def BlockModel_Classifier(input_shape, filt_num=16, numBlocks=3):
"""Creates a Block model for pretraining on classification task
Args:
input shape: a list or tuple of [rows,cols,channels] of input images
filt_num: the number of filters in the first and last layers
This number is multipled linearly increased and decreased throughout the model
numBlocks: number of processing blocks. The larger the number the deeper the model
output_chan: number of output channels. Set if doing multi-class segmentation
regression: Whether to have a continuous output with linear activation
Returns:
An unintialized Keras model
Example useage: SegModel = BlockModel2D([256,256,1],filt_num=8)
Notes: Using rows/cols that are powers of 2 is recommended. Otherwise,
the rows/cols must be divisible by 2^numBlocks for skip connections
to match up properly
"""
use_bn = True
# check for input shape compatibility
rows, cols = input_shape[0:2]
assert rows % 2**numBlocks == 0, "Input rows and number of blocks are incompatible"
assert cols % 2**numBlocks == 0, "Input cols and number of blocks are incompatible"
# calculate size reduction
startsize = np.max(input_shape[0:2])
minsize = (startsize-np.sum(2**np.arange(1, numBlocks+1)))/2**numBlocks
assert minsize > 4, "Too small of input for this many blocks. Use fewer blocks or larger input"
# input layer
lay_input = Input(shape=input_shape, name='input_layer')
# contracting blocks
x = lay_input
skip_list = []
for rr in range(1, numBlocks+1):
x1 = Conv2D(filt_num*rr, (1, 1), padding='same',
name='Conv1_{}'.format(rr))(x)
if use_bn:
x1 = BatchNormalization()(x1)
x1 = ELU(name='elu_x1_{}'.format(rr))(x1)
x3 = Conv2D(filt_num*rr, (3, 3), padding='same',
name='Conv3_{}'.format(rr))(x)
if use_bn:
x3 = BatchNormalization()(x3)
x3 = ELU(name='elu_x3_{}'.format(rr))(x3)
x51 = Conv2D(filt_num*rr, (3, 3), padding='same',
name='Conv51_{}'.format(rr))(x)
if use_bn:
x51 = BatchNormalization()(x51)
x51 = ELU(name='elu_x51_{}'.format(rr))(x51)
x52 = Conv2D(filt_num*rr, (3, 3), padding='same',
name='Conv52_{}'.format(rr))(x51)
if use_bn:
x52 = BatchNormalization()(x52)
x52 = ELU(name='elu_x52_{}'.format(rr))(x52)
x = concatenate([x1, x3, x52], name='merge_{}'.format(rr))
x = Conv2D(filt_num*rr, (1, 1), padding='valid',
name='ConvAll_{}'.format(rr))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_all_{}'.format(rr))(x)
x = ZeroPadding2D(padding=(1, 1), name='PrePad_{}'.format(rr))(x)
x = Conv2D(filt_num*rr, (4, 4), padding='valid',
strides=(2, 2), name='DownSample_{}'.format(rr))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_downsample_{}'.format(rr))(x)
x = Conv2D(filt_num*rr, (3, 3), padding='same',
name='ConvClean_{}'.format(rr))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_skip_{}'.format(rr))(x)
# average pooling
x = GlobalAveragePooling2D()(x)
# classifier
lay_out = Dense(1, activation='sigmoid', name='output_layer')(x)
return Model(lay_input, lay_out)
def ConvertEncoderToCED(model):
# Returns a model with frozen encoder layers
# and complimentary, unfrozen decoder layers
# get input layer
# model must be compiled again after using this function
lay_input = model.input
# get skip connection layer outputs
skip_list = [l.output for l in model.layers if 'skip' in l.name]
numBlocks = len(skip_list)
filt_num = int(skip_list[0].shape[-1])
x = model.layers[-3].output
# freeze encoder layers
for layer in model.layers:
layer.trainable = False
use_bn = True
# make expanding blocks
expnums = list(range(1, numBlocks+1))
expnums.reverse()
for dd in expnums:
if dd < len(skip_list):
x = concatenate([skip_list[dd-1], x],
name='skip_connect_{}'.format(dd))
x1 = Conv2D(filt_num*dd, (1, 1), padding='same',
name='DeConv1_{}'.format(dd))(x)
if use_bn:
x1 = BatchNormalization()(x1)
x1 = ELU(name='elu_Dx1_{}'.format(dd))(x1)
x3 = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConv3_{}'.format(dd))(x)
if use_bn:
x3 = BatchNormalization()(x3)
x3 = ELU(name='elu_Dx3_{}'.format(dd))(x3)
x51 = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConv51_{}'.format(dd))(x)
if use_bn:
x51 = BatchNormalization()(x51)
x51 = ELU(name='elu_Dx51_{}'.format(dd))(x51)
x52 = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConv52_{}'.format(dd))(x51)
if use_bn:
x52 = BatchNormalization()(x52)
x52 = ELU(name='elu_Dx52_{}'.format(dd))(x52)
x = concatenate([x1, x3, x52], name='Dmerge_{}'.format(dd))
x = Conv2D(filt_num*dd, (1, 1), padding='valid',
name='DeConvAll_{}'.format(dd))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_Dall_{}'.format(dd))(x)
x = UpSampling2D(size=(2, 2), name='UpSample_{}'.format(dd))(x)
x = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConvClean1_{}'.format(dd))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_Dclean1_{}'.format(dd))(x)
x = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConvClean2_{}'.format(dd))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_Dclean2_{}'.format(dd))(x)
# classifier
lay_out = Conv2D(1, (1, 1), activation='sigmoid', name='output_layer')(x)
return Model(lay_input, lay_out)
def Inception_model(input_shape=(299, 299, 3)):
incep_model = InceptionV3(
include_top=False, weights=None, input_shape=input_shape, pooling='avg')
input_layer = incep_model.input
incep_output = incep_model.output
# x = Conv2D(16, (3, 3), activation='relu')(incep_output)
# x = Flatten()(x)
x = Dense(1, activation='sigmoid')(incep_output)
return Model(inputs=input_layer, outputs=x)
``` |
{
"source": "jmjackowiak/advent_of_code_2019",
"score": 3
} |
#### File: advent_of_code_2019/day10/main.py
```python
from _collections import defaultdict
def get_asteroids(lst):
asteroids = []
for y in range(len(lst)):
for x in range(len(lst[y])):
if lst[y][x] == "#":
asteroids.append((y, x))
return asteroids
def get_visible_asteroids(asteroids):
visible = {}
for y, x in asteroids:
left = set()
right = set()
for y1, x1 in asteroids:
if x == x1 and y == y1:
continue
if (x1 < x) or (x1 == x and y1 < y):
side = left
else:
side = right
if x1 == x:
angle = float('inf')
else:
angle = (y1 - y) / (x1 - x)
side.add(angle)
visible[(y, x)] = len(left) + len(right)
return visible
def part1(lst):
asteroids = get_asteroids(lst)
visible = get_visible_asteroids(asteroids)
return max(visible.values())
def part2(lst):
asteroids = get_asteroids(lst)
visible = get_visible_asteroids(asteroids)
for i in visible.keys():
if visible[i] == max(visible.values()):
y, x = i
break
asteroids_centered = [(y1 - y, x1 - x) for y1, x1 in asteroids]
dist_l = defaultdict(list)
dist_r = defaultdict(list)
for y1, x1 in asteroids_centered:
if x1 == 0 and y1 == 0:
continue
if x1 < 0:
dist = dist_l
else:
dist = dist_r
if x1 == 0:
angle = y1 * float('inf')
else:
angle = y1 / x1
dist[angle].append((y1, x1))
for i in dist_r.keys():
dist_r[i] = sorted(dist_r[i], key=lambda tupl: tupl[0] ** 2 + tupl[1] ** 2)
for i in dist_l.keys():
dist_l[i] = sorted(dist_l[i], key=lambda tupl: tupl[0] ** 2 + tupl[1] ** 2)
vaporidez = []
while any(dist_l.values()) or any(dist_r.values()):
for i in sorted(dist_r.keys()):
if dist_r[i]:
vaporidez.append(dist_r[i].pop(0))
for i in sorted(dist_l.keys()):
if dist_l[i]:
vaporidez.append(dist_l[i].pop(0))
return (vaporidez[199][1] + x) * 100 + vaporidez[199][0] + y
with open("input.txt", 'r') as file:
lst = [[j for j in i.replace("\n", "")] for i in file.readlines()]
print(part1(lst))
print(part2(lst))
``` |
{
"source": "jmjackowiak/advent_of_code_2020",
"score": 3
} |
#### File: advent_of_code_2020/day_07/main.py
```python
import queue
bags = {}
with open("input.txt", "r") as file:
for line in file.readlines():
line = line.replace(".","").replace("bags", "").replace("bag","")
bag, content = line.split(" contain ")
bags[bag] = {}
for i in content.split(","):
b = " ".join(i.split()[1:])
if b == "other":
continue
q = int(i.split()[0])
bags[bag][b] = q
def valid_outmost(valid_bag, bags):
containt_bag = set()
len_before = 0
for bag, contents in bags.items():
if valid_bag in contents:
containt_bag.add(bag)
while len_before!=len(containt_bag):
len_before = len(containt_bag)
for bag, content in bags.items():
if bag in containt_bag:
continue
for i in containt_bag.copy():
if i in content:
containt_bag.add(bag)
return containt_bag
def bag_capacity(bag_name, bags):
capacity = 0
q = queue.Queue()
q.put((bag_name,1))
while not q.empty():
top_bag, mult = q.get()
for bag, cap in bags[top_bag].items():
capacity += cap*mult
for b,c in bags[bag].items():
capacity += mult*cap*c
q.put((b,mult*cap*c))
return capacity
print(f"Part 1: {len(valid_outmost('shiny gold', bags))}")
print(f"Part 2: {bag_capacity('shiny gold',bags)}")
```
#### File: advent_of_code_2020/day_08/main.py
```python
program = []
with open("input.txt", "r") as file:
for line in file.readlines():
program.append(line.split())
def execute(op, arg, ic, acc):
# return ic change, acc change
if op == "nop":
return ic+1, acc
if op == "jmp":
return ic+int(arg), acc
if op == "acc":
return ic+1, acc+int(arg)
raise ValueError(f"No operation: {op}")
def find_loop(program):
ic = 0
acc = 0
executed = set()
while ic < len(program):
executed.add(ic)
op, arg = program[ic]
new_ic, new_acc = execute(op, arg, ic, acc)
if new_ic in executed:
#Found loop
return (ic, acc)
ic, acc = new_ic, new_acc
return (ic, acc)
def part2(program):
for i in range(len(program)):
original = program[i][0]
if program[i][0] == "jmp":
program[i][0] = "nop"
elif program[i][0] == "nop":
program[i][0] == "jmp"
else:
continue
ic, acc = find_loop(program)
if ic>=len(program):
return acc
else:
program[i][0] = original
print(f"Part 1: {find_loop(program)[1]}")
print(f"Part 2: {part2(program)}")
```
#### File: advent_of_code_2020/day_15/main.py
```python
import time
with open("input.txt", "r") as f:
numbers = [int(i) for i in f.readline().split(",")]
def part1(numbers, nth):
mem = {}
turn = 1
num = 0
for i in numbers:
if i in mem:
num = turn - mem[i]
else:
num = 0
mem[i] = turn
turn += 1
while turn != nth:
if num in mem:
next_num = turn - mem[num]
else:
next_num = 0
mem[num] = turn
num = next_num
turn += 1
return num
print(f"Part 1: {part1(numbers, 2020)}")
print(f"Part 2: {part1(numbers, 30000000)}")
```
#### File: advent_of_code_2020/day_16/main.py
```python
def part1(req, tickets):
all_req = set()
for i in req.values():
all_req |= i
invalid = set()
total = 0
for i in range(len(tickets)):
ticket = tickets[i]
for j in ticket:
if j not in all_req:
total += j
invalid.add(i)
break
return total, invalid
def part2(req, tickets, my_ticket, invalid):
tickets = [tickets[i] for i in range(len(tickets)) if i not in invalid]
tickets.append(my_ticket)
field_index_map = {}
for i in req:
field_index_map[i] = set()
for index in range(len(req)):
for ticket in tickets:
if ticket[index] not in req[i]:
break
else:
field_index_map[i].add(index)
total = 1
assigned = set()
for k, _ in sorted(field_index_map.items(), key=lambda x: len(x[1])):
field_index_map[k] -= assigned
field_index_map[k] = field_index_map[k].pop()
assigned.add(field_index_map[k])
for i in field_index_map:
if "departure" in i:
total *= my_ticket[field_index_map[i]]
return total
req = {}
my_ticket = []
tickets = []
with open('input.txt', 'r') as f:
data = req
for line in f.readlines():
line = line.strip("\n")
if line == "":
continue
if line == "your ticket:":
data = my_ticket
continue
elif line == "nearby tickets:":
data = tickets
continue
if data is req:
name, values = line.split(":")
left, right = values.split(" or ")
l_left, h_left = left.split("-")
l_right, h_right = right.split("-")
req[name] = set()
for j in range(int(l_left), int(h_left)+1):
req[name].add(j)
for j in range(int(l_right), int(h_right)+1):
req[name].add(j)
else:
data.append([int(j) for j in line.split(",")])
my_ticket = my_ticket[0]
total_invalid, invalid = part1(req, tickets)
print(f"Part 1: {total_invalid}")
print(f"Part 2: {part2(req, tickets, my_ticket, invalid)}")
```
#### File: advent_of_code_2020/day_19/main.py
```python
with open("input.txt", "r") as f:
rules_data, messages_data = f.read().split("\n\n")
rules = {}
for rule in rules_data.split("\n"):
n, r = rule.split(": ")
n = int(n)
rules[n] = []
for i in r.split(" | "):
if '"' in i:
rules[n].append(i.replace('"', "").split()[0])
else:
rules[n].append([int(j) for j in i.split()])
messages = messages_data.split("\n")
def check_messages(messages, rules, depth):
total = 0
#The rule 0 is build from 8 and 11, they both are build from 42 and 31 so we just generate those
valid_31 = set()
valid_42 = set()
for rule_set in convert_rule(31, rules):
for rule in rule_set:
valid_31.add(rule)
for rule_set in convert_rule(42, rules):
for rule in rule_set:
valid_42.add(rule)
num_to_valid = {31: valid_31, 42: valid_42}
comb = []
temp = []
# Generate the possible combinations without storing them whole
for i in range(1, depth+1):
temp = [42] + temp + [31]
for j in range(1, depth+1):
comb.append([42]*j+temp)
for message in messages:
for valid in comb:
start = 0
for i in valid:
len_valid = len(next(iter(num_to_valid[i])))
if message[start:start+len_valid] not in num_to_valid[i]:
break
start += len_valid
else:
if start == len(message):
total += 1
break
return total
def convert_rule(rule, rules):
converted_rules = []
for rule_set in rules[rule]:
converted = [""]
for r in rule_set:
if r in rules:
new_converted = []
for k in convert_rule(r, rules):
for j in k:
for base in converted:
new_converted.append(base + j)
converted = new_converted
else:
for i in range(len(converted)):
converted[i] += r
converted_rules.append(converted)
return converted_rules
print(f"Part 1: {check_messages(messages, rules,1)}")
#Increase the depth for longer inputs
print(f"Part 2: {check_messages(messages, rules,10)}")
```
#### File: advent_of_code_2020/day_24/main.py
```python
with open("input.txt", "r") as file:
tiles_to_flip = [i for i in file.readlines()]
movement = {"w": (-1, 0),
"e": (1, 0),
"S": (1, -1),
"W": (0, -1),
"E": (0, 1),
"N": (-1, 1)}
black_tiles = set()
for tile in tiles_to_flip:
tile = tile.replace("se", "S").replace("sw", "W").replace(
"nw", "N").replace("ne", "E").replace("\n", "")
x,y = 0,0
for i in tile:
dx,dy = movement[i]
x+=dx
y+=dy
if (x,y) not in black_tiles:
black_tiles.add((x,y))
else:
black_tiles.remove((x,y))
def part2(black_tiles):
for _ in range(100):
black_neighbours = {}
for tile in black_tiles:
x,y = tile
if (x,y) not in black_neighbours:
black_neighbours[(x,y)] = 0
for m in movement.values():
dx, dy = m
dx = dx+x
dy = dy+y
if (dx, dy) not in black_neighbours:
black_neighbours[(dx, dy)] = 0
black_neighbours[(dx, dy)]+=1
new_black = set()
for k,v in black_neighbours.items():
x,y = k
if v==2 and (x, y) not in black_tiles:
new_black.add((x,y))
elif (x,y) in black_tiles:
if v>0 and v<=2:
new_black.add((x,y))
black_tiles = new_black
return len(black_tiles)
print(f"Part 1: {len(black_tiles)}")
print(f"Part 2: {part2(black_tiles)}")
``` |
{
"source": "jmjacquet/IronWeb",
"score": 2
} |
#### File: IronWeb/comprobantes/models.py
```python
from __future__ import unicode_literals
from django.db.models.signals import post_save, post_delete
from django.core.urlresolvers import reverse
from django.db import models
from datetime import datetime, date
from dateutil.relativedelta import *
from django.conf import settings
import os
from django.utils.translation import ugettext_lazy as _
from general.utilidades import *
from productos.models import (prod_productos, gral_tipo_iva, prod_ubicacion,
prod_lista_precios, prod_producto_lprecios,
)
from django.db.models import Sum
from decimal import Decimal
from django.utils import timezone
from django.dispatch import receiver
from entidades.models import egr_entidad
class cpb_estado(models.Model):
id = models.AutoField(primary_key=True, db_index=True)
nombre = models.CharField(u'Nombre', max_length=100)
color = models.CharField(u'Color', max_length=200, blank=True, null=True)
tipo = models.IntegerField(
u'Tipo CPB', choices=TIPO_COMPROBANTE, default=1, blank=True, null=True)
class Meta:
db_table = 'cpb_estado'
def __unicode__(self):
return u'%s' % (self.nombre)
class cpb_tipo(models.Model):
id = models.AutoField(primary_key=True, db_index=True)
nombre = models.CharField(_(u'Nombre'), max_length=100)
detalle = models.CharField(
_(u'Detalle'), max_length=100, blank=True, null=True)
codigo = models.CharField(
_(u'Código'), max_length=10, blank=True, null=True)
tipo = models.IntegerField(
_(u'Tipo CPB'), choices=TIPO_COMPROBANTE, default=1, blank=True, null=True)
# False para los cpb que no tienen el pto de venta por tabla, sino entero o copiado del CPB originante
usa_pto_vta = models.BooleanField(_(u'Usa Pto Venta'), default=False)
# Usado para los cpb que son comunes a todos, ej remito, presupuesto, etc
ultimo_nro = models.PositiveIntegerField(
_(u'Último Nº'), default=0, blank=True, null=True)
usa_forma_pago = models.BooleanField(_(u'Usa FP'), default=True)
signo_forma_pago = models.IntegerField(
_(u'Signo FP'), choices=SIGNO, default=1, blank=True, null=True)
usa_ctacte = models.BooleanField(_(u'Usa Cta.Cte.'), default=True)
signo_ctacte = models.IntegerField(
_(u'Signo Cta.Cte.'), choices=SIGNO, default=1, blank=True, null=True)
usa_stock = models.BooleanField(_(u'Usa Stock'), default=True)
signo_stock = models.IntegerField(
u'Signo Stock', choices=SIGNO, default=1, blank=True, null=True)
compra_venta = models.CharField(
max_length=1, blank=True, null=True, default='V')
libro_iva = models.BooleanField(u'Libro IVA', default=True)
signo_libro_iva = models.IntegerField(
u'Signo Libro IVA', default=1, blank=True, null=True)
facturable = models.BooleanField(u'Facturable', default=True)
baja = models.BooleanField(default=False)
# fecha_creacion = models.DateTimeField(blank=True, null=True)
fecha_modif = models.DateTimeField(blank=True, null=True)
class Meta:
db_table = 'cpb_tipo'
def save(self, *args, **kwargs):
''' On save, update timestamps '''
self.fecha_modif = timezone.now()
return super(cpb_tipo, self).save(*args, **kwargs)
def __unicode__(self):
return u'%s' % (self.nombre)
class cpb_nro_afip(models.Model):
id = models.AutoField(primary_key=True, db_index=True)
cpb_tipo = models.IntegerField(
u'Tipo CPB', choices=TIPO_COMPROBANTE, default=1, blank=True, null=True)
letra = models.CharField(
u'Letra', choices=COMPROB_FISCAL, max_length=1, blank=True, null=True)
numero_afip = models.PositiveSmallIntegerField(
u'Nº AFIP', blank=True, null=True)
class Meta:
db_table = 'cpb_nro_afip'
def __unicode__(self):
return u' %s (%s %s)' % (self.numero_afip, self.get_cpb_tipo_display(), self.letra)
class cpb_pto_vta(models.Model):
id = models.IntegerField(u'Número', primary_key=True, db_index=True)
empresa = models.ForeignKey('general.gral_empresa', db_column='empresa',
blank=True, null=True, on_delete=models.SET_NULL)
numero = models.IntegerField(u'Número PV')
nombre = models.CharField(u'Nombre Punto Venta',
max_length=50, blank=True, null=True)
es_sucursal = models.BooleanField(u'Es Sucursal', default=False)
domicilio = models.CharField(
'Domicilio', max_length=200, blank=True, null=True)
provincia = models.IntegerField(
'Provincia', choices=PROVINCIAS, blank=True, null=True, default=12)
localidad = models.CharField(
'Localidad', max_length=100, blank=True, null=True)
cod_postal = models.CharField('CP', max_length=50, blank=True, null=True)
email = models.EmailField('Email', blank=True, null=True)
telefono = models.CharField(
u'Teléfono', max_length=50, blank=True, null=True)
celular = models.CharField(
_('Celular'), max_length=50, blank=True, null=True)
baja = models.BooleanField(default=False)
interno = models.BooleanField(default=False)
fecha_creacion = models.DateTimeField(auto_now_add=True)
fecha_modif = models.DateTimeField(auto_now=True)
# DATOS FISCALES
fe_electronica = models.BooleanField(
_(u'Factura Electrónica'), default=False)
leyenda = models.BooleanField(_(u'Agente de Retención'), default=False)
categ_fiscal = models.IntegerField(
_(u'Categoría Fiscal'), choices=CATEG_FISCAL, blank=True, null=True)
cuit = models.CharField('CUIT', max_length=50)
iibb = models.CharField(u'Nº IIBB', max_length=50, blank=True, null=True)
fecha_inicio_activ = models.DateTimeField(
_('Fecha Inicio Actividades'), null=True)
nombre_fantasia = models.CharField(
_(u'Nombre Fantasía'), max_length=200, blank=True, null=True)
# Field name made lowercase.
ruta_logo = models.CharField(
db_column='ruta_logo', max_length=100, null=True, blank=True)
tipo_logo_factura = models.IntegerField(
_(u'Tipo Logotipo'), choices=TIPO_LOGOTIPO, blank=True, null=True)
fe_crt = models.CharField(_('Nombre Archivo CRT'),
max_length=50, blank=True, null=True)
fe_key = models.CharField(_('Nombre Archivo Key'),
max_length=50, blank=True, null=True)
class Meta:
db_table = 'cpb_pto_vta'
def __unicode__(self):
return u'%s - %s' % ("{num:>05}".format(num=str(self.numero)), self.nombre)
def get_numero(self):
return u'%s' % ("{num:>05}".format(num=str(self.numero)))
class cpb_pto_vta_numero(models.Model):
id = models.AutoField(primary_key=True, db_index=True)
cpb_tipo = models.ForeignKey(
'comprobantes.cpb_tipo', verbose_name=u'Tipo CPB', db_column='cpb_tipo', blank=True, null=True)
letra = models.CharField(
u'Letra', choices=COMPROB_FISCAL, max_length=1, blank=True, null=True)
cpb_pto_vta = models.ForeignKey('comprobantes.cpb_pto_vta', verbose_name=u'Punto Vta',
db_column='cpb_pto_vta', blank=True, null=True, on_delete=models.SET_NULL)
ultimo_nro = models.PositiveIntegerField(
u'Último Nº', default=0, blank=True, null=True)
empresa = models.ForeignKey('general.gral_empresa', db_column='empresa',
blank=True, null=True, on_delete=models.SET_NULL)
class Meta:
db_table = 'cpb_pto_vta_numero'
def __unicode__(self):
return u'%s %s-%s-%s' % (self.cpb_tipo, self.letra, self.cpb_pto_vta.numero, self.ultimo_nro)
class cpb_comprobante(models.Model):
id = models.AutoField(primary_key=True, db_index=True)
cpb_tipo = models.ForeignKey('comprobantes.cpb_tipo', verbose_name=_(
u'Tipo CPB'), db_column='cpb_tipo', blank=True, null=True)
entidad = models.ForeignKey('entidades.egr_entidad', db_column='entidad', related_name='cpb_entidad',
blank=True, null=True, on_delete=models.SET_NULL) # Cliente/Proveedor
vendedor = models.ForeignKey('entidades.egr_entidad', db_column='vendedor',
related_name='cpb_vendedor', blank=True, null=True, on_delete=models.SET_NULL)
pto_vta = models.IntegerField(blank=True, null=True, db_column='pto_vta')
#pto_vta = models.ForeignKey('cpb_pto_vta',blank=True, null=True,on_delete=models.SET_NULL)
letra = models.CharField(
_(u'Letra'), choices=COMPROB_FISCAL, max_length=1, blank=True, null=True)
numero = models.IntegerField(u'Número', blank=True, null=True)
condic_pago = models.IntegerField(
choices=CONDICION_PAGO, blank=True, null=True, default=1)
fecha_creacion = models.DateTimeField(
auto_now_add=True, blank=True, null=True)
fecha_cpb = models.DateField('Fecha Comprobante')
fecha_imputacion = models.DateField(blank=True, null=True)
fecha_vto = models.DateField(blank=True, null=True)
presup_tiempo_entrega = models.CharField(
u'Tiempo de Entrega', max_length=100, blank=True, null=True)
presup_forma_pago = models.CharField(
u'Forma de Pago', max_length=200, blank=True, null=True)
presup_aprobacion = models.ForeignKey(
'comprobantes.cpb_estado', related_name='presup_estado', blank=True, null=True, on_delete=models.SET_NULL)
cae = models.CharField(u'CAE', max_length=100, blank=True, null=True)
cae_vto = models.DateField('CAE Vto.', blank=True, null=True)
importe_gravado = models.DecimalField(
max_digits=15, decimal_places=2, blank=True, null=True, default=0) # Todo lo que tiene IVA
importe_iva = models.DecimalField(
max_digits=15, decimal_places=2, blank=True, null=True, default=0) # Suma de los IVA
importe_subtotal = models.DecimalField(
max_digits=15, decimal_places=2, blank=True, null=True, default=0) # Suma de gravado+IVA
importe_no_gravado = models.DecimalField(
max_digits=15, decimal_places=2, blank=True, null=True, default=0) # Suma de TiposIVA No Gravados
importe_exento = models.DecimalField(
max_digits=15, decimal_places=2, blank=True, null=True, default=0) # Suma de TiposIVA Exentos
importe_perc_imp = models.DecimalField(
max_digits=15, decimal_places=2, blank=True, null=True, default=0) # Suma de Percepciones e Impuestos
importe_ret = models.DecimalField(
max_digits=15, decimal_places=2, blank=True, null=True, default=0) # Suma de Retenciones
importe_total = models.DecimalField(
max_digits=15, decimal_places=2, blank=True, null=True, default=0) # Suma de todo
importe_tasa1 = models.DecimalField(
max_digits=15, decimal_places=2, blank=True, null=True, default=0) # Suma ITC (parte del Gravado)
importe_tasa2 = models.DecimalField(
max_digits=15, decimal_places=2, blank=True, null=True, default=0) # Suma TH (parte del Gravado)
estado = models.ForeignKey('comprobantes.cpb_estado', related_name='estado',
blank=True, null=True, on_delete=models.SET_NULL)
anulacion_motivo = models.CharField(
u'Motivo Anulación', max_length=200, blank=True, null=True)
anulacion_fecha = models.DateField(blank=True, null=True)
# Field name made lowercase.
observacion = models.TextField(max_length=500, blank=True, null=True)
id_cpb_padre = models.ForeignKey('comprobantes.cpb_comprobante', db_column='id_cpb_padre',
related_name="cpb_comprobante_padre", blank=True, null=True, on_delete=models.SET_NULL)
saldo = models.DecimalField(
max_digits=15, decimal_places=2, blank=True, null=True, default=0)
empresa = models.ForeignKey('general.gral_empresa', db_column='empresa',
blank=True, null=True, on_delete=models.SET_NULL)
usuario = models.ForeignKey('usuarios.usu_usuario', db_column='usuario', blank=True,
null=True, related_name='usu_usuario_cpb', on_delete=models.SET_NULL)
seguimiento = models.TextField(max_length=500, blank=True, null=True)
fecha_envio_mail = models.DateField(blank=True, null=True)
fecha_recepcion_mail = models.DateField(blank=True, null=True)
cae_observaciones = models.TextField(
max_length=1000, blank=True, null=True)
cae_excepcion = models.TextField(max_length=1000, blank=True, null=True)
cae_traceback = models.TextField(max_length=1000, blank=True, null=True)
cae_xml_request = models.TextField(max_length=1000, blank=True, null=True)
cae_xml_response = models.TextField(max_length=1000, blank=True, null=True)
cae_errores = models.TextField(max_length=1000, blank=True, null=True)
class Meta:
db_table = 'cpb_comprobante'
ordering = ['-fecha_cpb', '-pto_vta',
'-letra', '-id_cpb_padre__pk', '-numero']
def __unicode__(self):
return u'%s-%s-%s' % ("{num:>05}".format(num=str(self.pto_vta)), self.letra, "{num:>08}".format(num=str(self.numero)))
# def get_cpb(self):
# return u'%s-%s-%s' % ("{num:>04}".format(num=str(self.pto_vta)),self.letra,"{num:>08}".format(num=str(self.numero)))
@property
def get_cpb(self):
return u'%s-%s-%s' % ("{num:>05}".format(num=str(self.pto_vta)), self.letra, "{num:>08}".format(num=str(self.numero)))
def get_pto_vta(self):
try:
pv = cpb_pto_vta.objects.get(
numero=self.pto_vta, empresa=self.empresa)
except:
return None
return pv
@property
def estado_cpb(self):
# Si es presupuesto verifico que no esté vencido
if self.cpb_tipo.tipo == 6:
if (self.fecha_vto <= timezone.now().date()) and (self.estado.pk < 12):
e = cpb_estado.objects.get(pk=11)
else:
e = self.estado
else:
e = self.estado
return e
@property
def estado_color(self):
if self.estado:
return self.estado.color
@property
def seleccionable(self):
if self.cpb_tipo.compra_venta == 'V':
return (self.estado.id in [1, 2]) and not(self.cae and (self.estado.id == 2))
elif self.cpb_tipo.compra_venta == 'C':
return (self.estado.id in [1, 2])
@property
def vencimiento_cpb(self):
if self.fecha_vto:
if (self.fecha_vto <= timezone.now().date()):
e = cpb_estado.objects.get(pk=11)
else:
e = self.estado
else:
e = self.estado
return e
def get_nro_afip(self):
try:
c = cpb_nro_afip.objects.get(
cpb_tipo=self.cpb_tipo.tipo, letra=self.letra)
except:
return None
return c.numero_afip
def get_numero(self):
return '%s-%s' % ("{num:>05}".format(num=str(self.pto_vta)), "{num:>08}".format(num=str(self.numero)))
@property
def get_cpb_tipo(self):
return u'%s: %s-%s-%s ' % (self.cpb_tipo, "{num:>05}".format(num=str(self.pto_vta)), self.letra, "{num:>08}".format(num=str(self.numero)))
def get_cobranzas(self):
cobranzas = cpb_cobranza.objects.filter(cpb_comprobante=self, cpb_comprobante__estado__pk__lt=3).select_related(
'cpb_factura', 'cpb_factura__cpb_tipo', 'cpb_comprobante')
return list(cobranzas)
def tiene_cobranzas(self):
return cpb_cobranza.objects.filter(cpb_factura=self).count() > 0
def tiene_cobranzasREC_OP(self):
return cpb_cobranza.objects.filter(cpb_comprobante=self).count() > 0
def get_listado(self):
if self.cpb_tipo.pk in [1, 3, 5, 14, 20, 23]:
return reverse('cpb_venta_listado')
elif self.cpb_tipo.pk in [2, 4, 6, 18, 19]:
return reverse('cpb_compra_listado')
elif self.cpb_tipo.pk in [8]:
return reverse('cpb_remito_listado')
elif self.cpb_tipo.pk in [9]:
return reverse('cpb_remitoc_listado')
elif self.cpb_tipo.pk in [7]:
return reverse('cpb_rec_cobranza_listado')
elif self.cpb_tipo.pk in [12]:
return reverse('cpb_pago_listado')
elif self.cpb_tipo.pk == 11:
return reverse('cpb_presup_listado')
elif self.cpb_tipo.pk in [13]:
return reverse('movimientos_listado')
else:
return reverse('principal')
def get_importe_total(self):
signo = self.cpb_tipo.signo_ctacte
if not self.importe_total:
return 0
if signo:
return self.importe_total * signo
else:
return self.importe_total
def get_importe_subtotal(self):
signo = self.cpb_tipo.signo_ctacte
if not self.importe_subtotal:
return 0
if signo:
return self.importe_subtotal * signo
else:
return self.importe_subtotal
def get_importe_iva(self):
signo = self.cpb_tipo.signo_ctacte
if not self.importe_iva:
return 0
if signo:
return self.importe_iva * signo
else:
return self.importe_iva
def get_saldo(self):
signo = self.cpb_tipo.signo_ctacte
if not self.saldo:
return 0
if not self.saldo:
return 0
if signo:
return self.saldo * signo
else:
return self.saldo
def get_importe_gravado(self):
signo = self.cpb_tipo.signo_ctacte
if not self.importe_gravado:
return 0
if signo:
return self.importe_gravado * signo
else:
return self.importe_gravado
def get_importe_no_gravado(self):
signo = self.cpb_tipo.signo_ctacte
if not self.importe_no_gravado:
return 0
if signo:
return self.importe_no_gravado * signo
else:
return self.importe_no_gravado
def get_importe_exento(self):
signo = self.cpb_tipo.signo_ctacte
if not self.importe_exento:
return 0
if signo:
return self.importe_exento * signo
else:
return self.importe_exento
def get_importe_perc_imp(self):
signo = self.cpb_tipo.signo_ctacte
if not self.importe_perc_imp:
return 0
if signo:
return self.importe_perc_imp * signo
else:
return self.importe_perc_imp
class cpb_comprobante_detalle(models.Model):
id = models.AutoField(primary_key=True, db_index=True)
cpb_comprobante = models.ForeignKey('comprobantes.cpb_comprobante', verbose_name=u'CPB',
db_column='cpb_comprobante', blank=True, null=True, on_delete=models.CASCADE)
producto = models.ForeignKey('productos.prod_productos', db_column='producto', related_name='producto',
blank=True, null=True, on_delete=models.SET_NULL) # Cliente/Proveedor
cantidad = models.DecimalField(
max_digits=15, decimal_places=2, blank=True, null=True, default=1)
tasa_iva = models.ForeignKey('productos.gral_tipo_iva', verbose_name='Tasa IVA',
db_column='tasa_iva', blank=True, null=True, on_delete=models.SET_NULL)
coef_iva = models.DecimalField(
max_digits=5, decimal_places=3, default=0, blank=True, null=True)
lista_precios = models.ForeignKey('productos.prod_lista_precios', db_column='lista_precios',
related_name='cpb_lista_precios', blank=True, null=True, on_delete=models.SET_NULL) # Cliente/Pro
importe_costo = models.DecimalField(
max_digits=15, decimal_places=2, blank=True, null=True, default=0)
importe_unitario = models.DecimalField(
max_digits=15, decimal_places=2, blank=True, null=True, default=0)
porc_dcto = models.DecimalField(
max_digits=5, decimal_places=2, blank=True, null=True, default=0)
importe_subtotal = models.DecimalField(
max_digits=15, decimal_places=2, blank=True, null=True, default=0)
importe_iva = models.DecimalField(
max_digits=15, decimal_places=2, blank=True, null=True, default=0)
importe_total = models.DecimalField(
max_digits=15, decimal_places=2, blank=True, null=True, default=0)
importe_tasa1 = models.DecimalField(
max_digits=15, decimal_places=2, blank=True, null=True, default=0)
importe_tasa2 = models.DecimalField(
max_digits=15, decimal_places=2, blank=True, null=True, default=0) # Tasa Hidrica
origen_destino = models.ForeignKey('productos.prod_ubicacion', verbose_name='Origen/Destino',
db_column='origen_destino', blank=True, null=True, on_delete=models.SET_NULL)
fecha_creacion = models.DateTimeField(auto_now_add=True)
# Field name made lowercase.
detalle = models.TextField(max_length=500, blank=True, null=True)
class Meta:
db_table = 'cpb_comprobante_detalle'
def __unicode__(self):
return u'%s-%s' % (self.producto, self.cantidad)
def get_precio_unit_iva(self):
# return self.importe_unitario * (1+self.coef_iva)
return self.importe_unitario
def get_movim_stock(self):
# return self.importe_unitario * (1+self.coef_iva)
return self.cantidad * self.cpb_comprobante.cpb_tipo.signo_stock
@property
def get_costo_total(self):
return self.cantidad*self.importe_costo
@property
def get_utilidad_total(self):
return (self.importe_subtotal-(self.cantidad*self.importe_costo))
@property
def get_itc(self):
lpl = prod_producto_lprecios.objects.get(
producto=self.producto, lista_precios=self.lista_precios)
try:
lpl = prod_producto_lprecios.objects.get(
producto=self.producto, lista_precios=self.lista_precios)
except:
return None
return lpl.precio_itc
@property
def get_th(self):
try:
lpl = prod_producto_lprecios.objects.get(
producto=self.producto, lista_precios=self.lista_precios)
except:
return None
return lpl.precio_tasa
class cpb_perc_imp(models.Model):
id = models.AutoField(primary_key=True, db_index=True)
nombre = models.CharField(u'Nombre', max_length=100)
descripcion = models.CharField(
u'Descripción', max_length=200, blank=True, null=True)
codigo = models.CharField(u'Código', max_length=2, blank=True, null=True)
empresa = models.ForeignKey('general.gral_empresa', db_column='empresa',
blank=True, null=True, on_delete=models.SET_NULL)
class Meta:
db_table = 'cpb_perc_imp'
def __unicode__(self):
return u'%s' % (self.nombre)
class cpb_comprobante_perc_imp(models.Model):
id = models.AutoField(primary_key=True, db_index=True)
cpb_comprobante = models.ForeignKey('comprobantes.cpb_comprobante', verbose_name=u'CPB',
db_column='cpb_comprobante', blank=True, null=True, on_delete=models.CASCADE)
perc_imp = models.ForeignKey('comprobantes.cpb_perc_imp', db_column='perc_imp',
blank=True, null=True, on_delete=models.SET_NULL) # Cliente/Proveedor
# Field name made lowercase.
detalle = models.TextField(max_length=500, blank=True, null=True)
importe_total = models.DecimalField(
max_digits=15, decimal_places=2, blank=True, null=True, default=0)
class Meta:
db_table = 'cpb_comprobante_perc_imp'
def __unicode__(self):
return u'%s-%s' % (self.perc_imp, self.importe_total)
class cpb_retenciones(models.Model):
id = models.AutoField(primary_key=True, db_index=True)
nombre = models.CharField(u'Nombre', max_length=100)
descripcion = models.CharField(
u'Descripción', max_length=200, blank=True, null=True)
codigo = models.CharField(u'Código', max_length=2, blank=True, null=True)
grupo = models.IntegerField(
'Grupo', choices=TIPO_RETENCIONES, blank=True, null=True, default=1)
empresa = models.ForeignKey('general.gral_empresa', db_column='empresa',
blank=True, null=True, on_delete=models.SET_NULL)
class Meta:
db_table = 'cpb_retenciones'
def __unicode__(self):
return u'%s' % (self.nombre)
class cpb_comprobante_retenciones(models.Model):
id = models.AutoField(primary_key=True, db_index=True)
cpb_comprobante = models.ForeignKey('comprobantes.cpb_comprobante', verbose_name=u'CPB',
db_column='cpb_comprobante', blank=True, null=True, on_delete=models.CASCADE)
retencion = models.ForeignKey('comprobantes.cpb_retenciones',
db_column='retencion', blank=True, null=True, on_delete=models.SET_NULL)
# Número del certificado de retención recibido.
ret_nrocpb = models.CharField(
verbose_name=u'Nº Certif. Retención', max_length=30, blank=True, null=True)
# Importe neto sujeto a la retención sufrida.
ret_importe_isar = models.DecimalField(
verbose_name=u'Importe Sujeto a Retención', max_digits=15, decimal_places=2, blank=True, null=True, default=0)
# Fecha del certificado de retención recibido.
ret_fecha_cpb = models.DateField(
verbose_name=u'Fecha Retención', blank=True, null=True)
detalle = models.TextField(max_length=500, blank=True, null=True)
# Valor de la retención.
importe_total = models.DecimalField(
u'Importe Retenido', max_digits=15, decimal_places=2, blank=True, null=True, default=0)
class Meta:
db_table = 'cpb_comprobante_retenciones'
def __unicode__(self):
return u'%s-%s' % (self.retencion, self.importe_total)
class cpb_comprobante_tot_iva(models.Model):
id = models.AutoField(primary_key=True, db_index=True)
cpb_comprobante = models.ForeignKey('comprobantes.cpb_comprobante', verbose_name=u'CPB',
db_column='cpb_comprobante', blank=True, null=True, on_delete=models.CASCADE)
importe_base = models.DecimalField(
max_digits=15, decimal_places=2, blank=True, null=True, default=0)
tasa_iva = models.ForeignKey(
'productos.gral_tipo_iva', verbose_name='Tasa IVA', db_column='cpb_tasa_iva', blank=True, null=True)
importe_total = models.DecimalField(
max_digits=15, decimal_places=2, blank=True, null=True, default=0)
class Meta:
db_table = 'cpb_comprobante_tot_iva'
def __unicode__(self):
return u'%s-%s' % (self.tasa_iva, self.importe_total)
@property
def get_iva(self):
return u'%s' % (self.tasa_iva.coeficiente*100)
class cpb_cobranza(models.Model):
id = models.AutoField(primary_key=True, db_index=True)
# ID del RECIBO o de la ORDEN PAGO
cpb_comprobante = models.ForeignKey('comprobantes.cpb_comprobante', verbose_name=u'CPB', related_name='cpb_cobranza_cpb',
db_column='cpb_comprobante', blank=True, null=True, on_delete=models.CASCADE)
# ID de la factura que voy a cancelar ya sea de COMPRA (para la ORDEN PAGO) o VENTA (para el RECIBO)
cpb_factura = models.ForeignKey('comprobantes.cpb_comprobante', verbose_name=u'Factura',
related_name='cpb_cobranza_factura', db_column='cpb_factura', blank=True, null=True, on_delete=models.CASCADE)
importe_total = models.DecimalField(
max_digits=15, decimal_places=2, blank=True, null=True, default=0)
# Descuento o Recargo que tuvo la factura
desc_rec = models.DecimalField(
max_digits=15, decimal_places=2, blank=True, null=True, default=0)
fecha_creacion = models.DateTimeField(auto_now_add=True)
class Meta:
db_table = 'cpb_cobranza'
def __unicode__(self):
return u'%s-%s-$ %s' % (self.cpb_comprobante, self.cpb_factura, self.importe_total)
class cpb_banco(models.Model):
id = models.AutoField(primary_key=True, db_index=True)
codigo = models.CharField(u'Código', max_length=20, blank=True, null=True)
nombre = models.CharField(u'Nombre', max_length=100)
empresa = models.ForeignKey('general.gral_empresa', db_column='empresa',
blank=True, null=True, on_delete=models.SET_NULL)
baja = models.BooleanField(default=False)
class Meta:
db_table = 'cpb_banco'
def __unicode__(self):
return u'%s - %s' % (self.codigo, self.nombre)
class cpb_cuenta(models.Model):
id = models.AutoField(primary_key=True, db_index=True)
codigo = models.CharField(u'Código', max_length=100, blank=True, null=True)
nombre = models.CharField(u'Nombre', max_length=100, blank=True, null=True)
nro_cuenta_bancaria = models.CharField(
u'CBU', max_length=100, blank=True, null=True)
empresa = models.ForeignKey('general.gral_empresa', db_column='empresa',
blank=True, null=True, on_delete=models.SET_NULL)
tipo = models.IntegerField(
u'Tipo Cuenta', choices=TIPO_CTA_DISPO, default=0, blank=True, null=True)
tipo_forma_pago = models.ForeignKey('comprobantes.cpb_tipo_forma_pago', db_column='tipo_forma_pago',
related_name='tipo_fp', blank=True, null=True, on_delete=models.SET_NULL)
modificable = models.BooleanField(default=True)
baja = models.BooleanField(default=False)
banco = models.ForeignKey('comprobantes.cpb_banco', db_column='banco',
related_name='cuenta_banco', blank=True, null=True, on_delete=models.SET_NULL)
class Meta:
db_table = 'cpb_cuenta'
def __unicode__(self):
return u'%s - %s' % (self.codigo, self.nombre)
class cpb_tipo_forma_pago(models.Model):
id = models.AutoField(primary_key=True, db_index=True)
codigo = models.CharField(u'Código', max_length=10, blank=True, null=True)
nombre = models.CharField(u'Nombre', max_length=200, blank=True, null=True)
signo = models.IntegerField(
u'Signo Cta.Cte.', choices=SIGNO, default=1, blank=True, null=True)
cuenta = models.ForeignKey('comprobantes.cpb_cuenta', db_column='cuenta',
related_name='fp_cuenta', blank=True, null=True, on_delete=models.SET_NULL)
empresa = models.ForeignKey('general.gral_empresa', db_column='empresa',
blank=True, null=True, on_delete=models.SET_NULL)
baja = models.BooleanField(default=False)
class Meta:
db_table = 'cpb_tipo_forma_pago'
def __unicode__(self):
return u'%s - %s' % (self.codigo, self.nombre)
class cpb_comprobante_fp(models.Model):
id = models.AutoField(primary_key=True, db_index=True)
cpb_comprobante = models.ForeignKey('comprobantes.cpb_comprobante', verbose_name=u'CPB',
db_column='cpb_comprobante', blank=True, null=True, on_delete=models.CASCADE)
tipo_forma_pago = models.ForeignKey('comprobantes.cpb_tipo_forma_pago', db_column='tipo_forma_pago',
related_name='tipo_forma_pago', blank=True, null=True, on_delete=models.SET_NULL)
cta_egreso = models.ForeignKey('comprobantes.cpb_cuenta', db_column='cta_egreso',
related_name='cta_egreso', blank=True, null=True, on_delete=models.SET_NULL)
cta_ingreso = models.ForeignKey('comprobantes.cpb_cuenta', db_column='cta_ingreso',
related_name='cta_ingreso', blank=True, null=True, on_delete=models.SET_NULL)
mdcp_fecha = models.DateField('Fecha', blank=True, null=True)
mdcp_banco = models.ForeignKey('comprobantes.cpb_banco', verbose_name='Banco',
db_column='mdcp_banco', blank=True, null=True, on_delete=models.SET_NULL)
mdcp_cheque = models.CharField(
u'Cheque Nº', max_length=50, blank=True, null=True)
importe = models.DecimalField(
'Importe', max_digits=15, decimal_places=2, blank=True, null=True, default=0)
# Field name made lowercase.
detalle = models.TextField(max_length=500, blank=True, null=True)
fecha_creacion = models.DateTimeField(auto_now_add=True)
mdcp_salida = models.ForeignKey('comprobantes.cpb_comprobante_fp', db_column='mdcp_salida',
related_name='fp_mov_salida', blank=True, null=True, on_delete=models.SET_NULL)
class Meta:
db_table = 'cpb_comprobante_fp'
def __unicode__(self):
descr = u'%s $ %s' % (self.tipo_forma_pago.nombre, self.importe)
if self.mdcp_banco and self.mdcp_fecha:
descr = u'%s - %s %s %s %s' % (descr, datetime.strftime(
self.mdcp_fecha, "%d/%m/%Y"), self.mdcp_banco, self.mdcp_cheque, self.detalle)
return descr
def get_selCheque(self):
if self.mdcp_cheque:
nro = u' | Nº: %s' % (self.mdcp_cheque)
else:
nro = ''
try:
fecha = u' | Vencimiento: %s' % (
datetime.strftime(self.mdcp_fecha, "%d/%m/%Y"))
except:
fecha = ''
try:
banco = ' | ' + str(self.mdcp_banco)
except:
banco = ''
try:
descr = ' | '+self.detalle
except:
descr = ''
return u'$ %s%s%s%s%s' % (self.importe, nro, fecha, banco, descr)
def get_estadoCheque(self):
estado = ''
if self.cta_ingreso:
if (self.cta_ingreso and (not self.cta_egreso) and (self.cta_ingreso.tipo == 2) and (not self.mdcp_salida)):
estado = 'EN CARTERA'
elif ((not self.cta_egreso) and (self.cta_ingreso.tipo == 2) and (self.cpb_comprobante.cpb_tipo.tipo == 4) and (self.mdcp_salida.cpb_comprobante.cpb_tipo.tipo == 8)):
estado = 'COBRO/DEPOSITO'
elif ((not self.cta_egreso) and (self.mdcp_salida)):
estado = 'UTILIZADO'
else:
if self.cta_egreso:
if((self.cta_egreso.tipo == 2) and (not self.mdcp_salida)):
estado = 'PAGADO/DIFERIDO'
return estado
def _get_origen(self):
cpb = cpb_comprobante_fp.objects.filter(mdcp_salida__id=self.id)[0]
return cpb.cpb_comprobante
get_origen = property(_get_origen)
######################################################################################################
def recalcular_saldo_cpb(idCpb): # pragma: no cover
cpb = cpb_comprobante.objects.get(pk=idCpb)
# Recalculo los importes del comprobante
importe_no_gravado = 0
importe_exento = 0
importe_gravado = 0
importe_iva = 0
importe_subtotal = 0
importe_total = 0
tot_perc_imp = 0
importe_tasa1 = 0
importe_tasa2 = 0
# Cobros y Pagos sólos no recalculan IVA ni detalles, etc
if cpb.cpb_tipo.tipo in [4, 7, 8]:
cpb.importe_gravado = importe_gravado
cpb.importe_iva = importe_iva
cpb.importe_no_gravado = importe_no_gravado
cpb.importe_exento = importe_exento
cpb.importe_perc_imp = 0
cpb.saldo = 0
if cpb.cpb_tipo.tipo in [5]:
cpb.importe_subtotal = importe_gravado + importe_no_gravado + importe_exento
cpb.importe_total = importe_subtotal + tot_perc_imp + importe_iva
cpb.save()
elif cpb.cpb_tipo.tipo in [1, 2, 3, 6, 9, 14, 21, 22, 23]:
cpb_detalles = cpb_comprobante_detalle.objects.filter(
cpb_comprobante=cpb)
for c in cpb_detalles:
if c.tasa_iva:
if c.tasa_iva.pk == 1:
importe_no_gravado = importe_no_gravado + c.importe_subtotal
elif c.tasa_iva.pk == 2:
importe_exento = importe_exento + c.importe_subtotal
else:
importe_gravado = importe_gravado + c.importe_subtotal
else:
importe_gravado = importe_gravado + c.importe_subtotal
importe_iva += c.importe_iva
if cpb.empresa.usa_impuestos:
if c.importe_tasa1:
importe_tasa1 += c.importe_tasa1
if c.importe_tasa2:
importe_tasa2 += c.importe_tasa2
try:
tot_perc_imp = cpb_comprobante_perc_imp.objects.filter(
cpb_comprobante=cpb).aggregate(sum=Sum('importe_total'))['sum']
except:
tot_perc_imp = 0
if not tot_perc_imp:
tot_perc_imp = 0
if (cpb.cpb_tipo.compra_venta == 'V') and cpb.empresa.usa_impuestos:
cpb.importe_tasa1 = importe_tasa1
cpb.importe_tasa2 = importe_tasa2
if cpb.empresa.usa_impuestos:
# Impuestos no gravados suman al No gravado
importe_no_gravado = importe_no_gravado + cpb.importe_tasa1 + cpb.importe_tasa2
importe_subtotal = importe_gravado + importe_no_gravado + importe_exento
importe_total = importe_subtotal + tot_perc_imp + importe_iva
cpb.importe_gravado = importe_gravado
cpb.importe_iva = importe_iva
cpb.importe_subtotal = importe_subtotal
cpb.importe_no_gravado = importe_no_gravado
cpb.importe_exento = importe_exento
cpb.importe_perc_imp = tot_perc_imp
cpb.importe_total = importe_total
# Si es de compra dejo lo que se cargó oportunamente en el comprobante
# Las cobranzas/pagos activos del Comprobante de Venta/Compra
cobranzas = cpb_cobranza.objects.filter(
cpb_factura=cpb, cpb_comprobante__estado__pk__lt=3).aggregate(sum=Sum('importe_total'))
importes = cobranzas['sum']
if not importes:
total = cpb.importe_total
else:
# Suma segun el signo
if cpb.cpb_tipo.usa_ctacte:
total = (cpb.importe_total - Decimal(importes)
* cpb.cpb_tipo.signo_ctacte)
else:
total = (cpb.importe_total - Decimal(importes))
cpb.saldo = total
estado = cpb_estado.objects.get(pk=1)
if (total == 0) and (cpb.estado.pk < 3):
estado = cpb_estado.objects.get(pk=2)
cpb.estado = estado
cpb.save()
# regenero los totales de iva por comprobante
cpb_comprobante_tot_iva.objects.filter(cpb_comprobante=cpb).delete()
coeficientes = cpb_detalles.filter(tasa_iva__id__gt=2).values('tasa_iva').annotate(
importe_total=Sum('importe_iva'), importe_base=Sum('importe_subtotal'))
for cc in coeficientes:
tasa = gral_tipo_iva.objects.get(pk=cc['tasa_iva'])
cpb_ti = cpb_comprobante_tot_iva(
cpb_comprobante=cpb, tasa_iva=tasa, importe_total=cc['importe_total'], importe_base=cc['importe_base'])
cpb_ti.save()
def recalcular_saldos_cobranzas(idCpb): # pragma: no cover
cpb = cpb_comprobante.objects.get(pk=idCpb)
importe_no_gravado = 0
importe_exento = 0
importe_gravado = 0
importe_iva = 0
importe_subtotal = 0
importe_total = 0
tot_perc_imp = 0
try:
tot_perc_imp = cpb_comprobante_perc_imp.objects.filter(
cpb_comprobante=cpb).aggregate(sum=Sum('importe_total'))['sum']
except:
tot_perc_imp = 0
if not tot_perc_imp:
tot_perc_imp = 0
importe_subtotal = importe_gravado + importe_no_gravado + importe_exento
importe_total = importe_subtotal + tot_perc_imp + importe_iva
cpb.importe_gravado = importe_gravado
cpb.importe_iva = importe_iva
cpb.importe_subtotal = importe_subtotal
cpb.importe_no_gravado = importe_no_gravado
cpb.importe_exento = importe_exento
cpb.importe_perc_imp = tot_perc_imp
cpb.importe_total = importe_total
# Las cobranzas/pagos activos del Comprobante de Venta/Compra
cobranzas = cpb_cobranza.objects.filter(
cpb_comprobante=cpb, cpb_comprobante__estado__pk__lt=3).aggregate(sum=Sum('importe_total'))
importes = cobranzas['sum']
if not importes:
total = cpb.importe_total
else:
# Suma segun el signo
if cpb.cpb_tipo.usa_ctacte:
total = (cpb.importe_total - Decimal(importes)
* cpb.cpb_tipo.signo_ctacte)
else:
total = (cpb.importe_total - Decimal(importes))
cpb.saldo = total
estado = cpb_estado.objects.get(pk=1)
if (total == 0) and (cpb.estado.pk < 3):
estado = cpb_estado.objects.get(pk=2)
cpb.estado = estado
cpb.save()
def ultimoNro(tipoCpb, ptoVenta, letra, entidad=None):
try:
tipo = cpb_tipo.objects.get(id=tipoCpb)
if tipo.usa_pto_vta == True:
pv = cpb_pto_vta.objects.get(numero=ptoVenta.numero)
pventa_tipoCpb, created = cpb_pto_vta_numero.objects.get_or_create(
cpb_tipo=tipo, letra=letra, cpb_pto_vta=pv, empresa=pv.empresa)
if created:
pventa_tipoCpb.ultimo_nro = 1
pventa_tipoCpb.save()
return 1
nro = pventa_tipoCpb.ultimo_nro + 1
else:
nro = 1
if entidad:
entidad = egr_entidad.objects.get(id=entidad)
ult_cpb = cpb_comprobante.objects.filter(entidad=entidad, cpb_tipo=tipo, letra=letra, pto_vta=int(
ptoVenta), empresa=entidad.empresa).order_by('numero').last()
if ult_cpb:
nro = ult_cpb.numero + 1
else:
nro = tipo.ultimo_nro + 1
return nro
except:
# print 'error ultimo nro'
tipo = cpb_tipo.objects.get(id=tipoCpb)
nro = tipo.ultimo_nro
return nro
def actualizar_stock(request, producto, ubicacion, id_tipo_cpb, cantidad):
estado = cpb_estado.objects.get(pk=2)
tipo_cpb = cpb_tipo.objects.get(pk=id_tipo_cpb)
# pv=cpb_pto_vta.objects.get(pk=-1)
pv = 0
recibo = cpb_comprobante(cpb_tipo=tipo_cpb, estado=estado, pto_vta=pv, letra="X", numero='{0:0{width}}'.format((ultimoNro(id_tipo_cpb, pv, "X")+1), width=8), fecha_cpb=hoy(
), fecha_imputacion=hoy(), importe_iva=None, importe_total=None, usuario=usuario_actual(request), fecha_vto=hoy(), empresa=empresa_actual(request))
recibo.save()
detalle = cpb_comprobante_detalle(cpb_comprobante=recibo, producto=producto, cantidad=cantidad, tasa_iva=producto.tasa_iva, coef_iva=producto.tasa_iva.coeficiente,
origen_destino=ubicacion, detalle=u'ACTUALIZACIÓN DE STOCK')
detalle.save()
def actualizar_stock_multiple(request, prods, id_tipo_cpb, cantidad):
estado = cpb_estado.objects.get(pk=2)
tipo_cpb = cpb_tipo.objects.get(pk=id_tipo_cpb)
# pv=cpb_pto_vta.objects.get(pk=-1)
pv = 0
recibo = cpb_comprobante(cpb_tipo=tipo_cpb, estado=estado, pto_vta=pv, letra="X", numero='{0:0{width}}'.format((ultimoNro(id_tipo_cpb, pv, "X")+1), width=8), fecha_cpb=hoy(
), fecha_imputacion=hoy(), importe_iva=None, importe_total=None, usuario=usuario_actual(request), fecha_vto=hoy(), empresa=empresa_actual(request))
recibo.save()
for p in prods:
detalle = cpb_comprobante_detalle(cpb_comprobante=recibo, producto=p.producto, cantidad=cantidad, tasa_iva=p.producto.tasa_iva, coef_iva=p.producto.tasa_iva.coeficiente,
origen_destino=p.ubicacion, detalle=u'ACTUALIZACIÓN DE STOCK')
detalle.save()
@receiver(post_save, sender=cpb_comprobante, dispatch_uid="actualizar_ultimo_nro")
def actualizar_ultimo_nro(sender, instance, created, **kwargs):
if created:
letra = instance.letra
tipo = instance.cpb_tipo
numero = instance.numero
if tipo.usa_pto_vta == True:
pventa = cpb_pto_vta.objects.get(
numero=instance.pto_vta, empresa=instance.empresa)
pventa_tipoCpb, created = cpb_pto_vta_numero.objects.get_or_create(
cpb_tipo=tipo, letra=letra, cpb_pto_vta=pventa, empresa=instance.empresa)
if pventa_tipoCpb.ultimo_nro <= numero:
pventa_tipoCpb.ultimo_nro += 1
pventa_tipoCpb.save()
else:
if not tipo.facturable:
tipo.ultimo_nro = numero
tipo.save()
@receiver(post_save, sender=cpb_cobranza, dispatch_uid="actualizar_cobranza")
@receiver(post_delete, sender=cpb_cobranza, dispatch_uid="actualizar_cobranza")
def actualizar_cobranza(sender, instance, **kwargs):
if instance:
if instance.cpb_factura:
recalcular_saldo_cpb(instance.cpb_factura.pk)
```
#### File: IronWeb/comprobantes/views.py
```python
from django.template import RequestContext,Context
from django.shortcuts import render, redirect, get_object_or_404,render_to_response,HttpResponseRedirect,HttpResponse
from django.template.loader import render_to_string,get_template
from django.views.generic import TemplateView,ListView,CreateView,UpdateView,FormView,DetailView
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.db import connection
from datetime import datetime,date,timedelta
from django.utils import timezone
from dateutil.relativedelta import *
from .forms import MovimCuentasForm,BancosForm,MovimCuentasFPForm,PercImpForm,FormaPagoForm,PtoVtaForm,DispoForm,SeguimientoForm,FormCheques,FormChequesCobro,PtoVtaEditForm,RetencForm
from django.http import HttpResponseRedirect,HttpResponseForbidden,HttpResponse
from django.db.models import Q,Sum,Count,F,DecimalField
from .models import *
import json
import random
from decimal import *
from modal.views import AjaxCreateView,AjaxUpdateView,AjaxDeleteView
from django.contrib import messages
from general.utilidades import *
from general.models import gral_empresa
from general.views import VariablesMixin,getVariablesMixin
from usuarios.views import tiene_permiso
from django.forms.models import inlineformset_factory,BaseInlineFormSet,modelformset_factory
from productos.models import prod_productos,prod_producto_ubicac,prod_producto_lprecios
from django.contrib.messages.views import SuccessMessageMixin
from django.core.serializers.json import DjangoJSONEncoder
from general.forms import ConsultaCpbs,pto_vta_habilitados,pto_vta_habilitados_list,ConsultaCpbsCompras
from django.utils.functional import curry
from django.forms.models import model_to_dict
@login_required
def recalcular_precios(request):
detalles = cpb_comprobante_detalle.objects.filter(cpb_comprobante__cpb_tipo__tipo__in=[1,2,3,9,14,21,22,23],cpb_comprobante__cpb_tipo__usa_stock=True)
for c in detalles:
lp = prod_producto_lprecios.objects.get(producto=c.producto,lista_precios=c.lista_precios)
c.importe_costo = lp.precio_costo
c.save()
return HttpResponseRedirect(reverse('principal'))
@login_required
def recalcular_cpbs(request):
comprobantes = cpb_comprobante.objects.all()
for c in comprobantes:
recalcular_saldo_cpb(c.id)
return HttpResponseRedirect(reverse('principal'))
@login_required
def recalcular_cobranzas(request):
comprobantes = cpb_comprobante.objects.filter(cpb_tipo__tipo__in = [4,7,8])
for c in comprobantes:
recalcular_saldos_cobranzas(c.id)
return HttpResponseRedirect(reverse('principal'))
@login_required
def eliminar_detalles_fp_huerfanos(request):
empresa = empresa_actual(request)
ids = cpb_comprobante.objects.all().values_list('id',flat=True)
ids = [int(x) for x in ids]
detalles = cpb_comprobante_detalle.objects.filter(cpb_comprobante__empresa=empresa).exclude(cpb_comprobante__id__in=ids).values_list('cpb_comprobante',flat=True)
# for c in detalles
# recalcular_saldo_cpb(c.id)
return HttpResponse( json.dumps(list(detalles), cls=DjangoJSONEncoder), content_type='application/json' )
@login_required
def recalcular_compras(request):
usr= request.user
try:
usuario = usr
except:
usuario = None
try:
tipo_usr = usr.userprofile.id_usuario.tipoUsr
except:
tipo_usr = 1
try:
empresa = usr.userprofile.id_usuario.empresa
except gral_empresa.DoesNotExist:
empresa = None
comprobantes = cpb_comprobante.objects.filter(cpb_tipo__tipo__in=[1,2,3,9,21,22,23],cpb_tipo__compra_venta='C',empresa=empresa).order_by('-fecha_cpb','-id','-fecha_creacion')
for c in comprobantes:
recalcular_saldo_cpb(c.id)
return HttpResponseRedirect(reverse('cpb_compra_listado'))
@login_required
def recalcular_presupuestos(request):
usr= request.user
try:
usuario = usr
except:
usuario = None
try:
tipo_usr = usr.userprofile.id_usuario.tipoUsr
except:
tipo_usr = 1
try:
empresa = usr.userprofile.id_usuario.empresa
except gral_empresa.DoesNotExist:
empresa = None
comprobantes = cpb_comprobante.objects.filter(cpb_tipo__tipo__in=[6],cpb_tipo__compra_venta='V',empresa=empresa).order_by('-fecha_cpb','-id','-fecha_creacion')
for c in comprobantes:
recalcular_saldo_cpb(c.id)
return HttpResponseRedirect(reverse('cpb_presup_listado'))
def puedeEditarCPB(cpb):
#cpb=cpb_comprobante.objects.get(pk=idCpb)
#Si es factura NC ND o Recibo
puede=(cpb.estado.id<3)
if cpb.cpb_tipo.tipo not in [4,7]:
puede=(cpb.importe_total==cpb.saldo) and (puede)
if cpb.cpb_tipo.facturable:
puede=not(cpb.cae) and (puede)
return puede
def puedeEliminarCPB(cpb):
#cpb=cpb_comprobante.objects.get(pk=idCpb)
#Si es factura NC ND o Recibo
puede=(cpb.estado.id<=3)
if cpb.cpb_tipo.tipo not in [4,7]:
puede=(cpb.importe_total==cpb.saldo) and (puede)
puede=(not(cpb.cae)) and (puede)
return puede
def comprobantes_con_saldo(tipo):
comprobantes = cpb_comprobante.objects.filter(cpb_tipo__tipo=tipo,saldo__gt=0).order_by('-fecha_cpb','-fecha_creacion')
return comprobantes
def saldo_cpb(idCpb):
cpb=cpb_comprobante.objects.get(pk=idCpb)
#los reciobs dde el cpb es el padre
cobranzas = cpb_cobranza.objects.filter(cpb_factura=cpb,cpb_comprobante__estado__pk__lt=3).aggregate(sum=Sum('importe_total'))
importes = cobranzas['sum']
if not importes:
return cpb.importe_total
else:
return (cpb.importe_total - Decimal(importes))
def cobros_cpb(idCpb):
cpb=cpb_comprobante.objects.get(pk=idCpb)
#los reciobs dde el cpb es el padre
cobranzas = cpb_cobranza.objects.filter(cpb_factura=cpb).aggregate(sum=Sum('importe_total'))
importes = cobranzas['sum']
return importes
def obtener_stock(prod_ubi):
total_stock = cpb_comprobante_detalle.objects.filter(cpb_comprobante__estado__in=[1,2],cpb_comprobante__cpb_tipo__usa_stock=True,cpb_comprobante__empresa__id=prod_ubi.ubicacion.empresa.id,producto__id=prod_ubi.producto.id,origen_destino__id=prod_ubi.ubicacion.id).prefetch_related('cpb_comprobante__empresa','producto','ubicacion').aggregate(total=Sum(F('cantidad') *F('cpb_comprobante__cpb_tipo__signo_stock'),output_field=DecimalField()))['total'] or 0
return total_stock
@login_required
def buscarDatosProd(request):
try:
prod= {}
idProd = request.GET.get('idp', '')
idubi = request.GET.get('idubi', None)
idlista = request.GET.get('idlista', None)
p = None
coeficiente = 0
ppedido = 0
stock = 1
pventa = 0
precio_siva = 0
costo_siva = 0
total_iva=0
precio_tot = 0
pcosto = 0
tasa_iva = 5 #Por defecto 0.21
pitc = 0.00
ptasa = 0.00
unidad = 'u.'
prod_lista = None
if idProd:
p = prod_productos.objects.get(id=idProd)
if p:
coeficiente = p.tasa_iva.coeficiente
tasa_iva = p.tasa_iva.id
unidad = p.get_unidad_display()
if idubi:
try:
prod_ubi = prod_producto_ubicac.objects.get(producto=p,ubicacion__id=idubi)
except:
prod_ubi = None
if prod_ubi:
stock = prod_ubi.get_stock_()
ppedido = prod_ubi.get_reposicion()
if idlista:
try:
prod_lista = prod_producto_lprecios.objects.get(producto=p,lista_precios__id=idlista)
except:
prod_lista = None
if prod_lista:
pventa = prod_lista.precio_venta
pcosto = prod_lista.precio_cimp
pitc = prod_lista.precio_itc
ptasa = prod_lista.precio_tasa
precio_siva = pventa /(1+coeficiente)
precio_siva = Decimal(round(precio_siva,2))
if prod_lista:
costo_siva = prod_lista.precio_costo
total_iva = pventa - precio_siva
total_iva = Decimal(round(total_iva, 2))
precio_tot = pventa
prod={'precio_venta':pventa,'precio_costo':pcosto,'stock':stock,'ppedido':ppedido,'tasa_iva__id':tasa_iva,'tasa_iva__coeficiente':coeficiente
,'unidad':unidad,'precio_siva':precio_siva,'total_iva':total_iva,'precio_tot':precio_tot,'costo_siva':costo_siva,'pitc':pitc,'ptasa':ptasa}
except:
prod= {}
return HttpResponse( json.dumps(prod, cls=DjangoJSONEncoder), content_type='application/json' )
def buscarPrecioProd(prod,letra,cant,precio):
coeficiente = 0
stock = 1
tasa_iva = 5 #Por defecto 0.21
unidad = 'u.'
if prod:
coeficiente = prod.tasa_iva.coeficiente
tasa_iva = prod.tasa_iva.id
unidad = prod.get_unidad_display()
precio_siva = precio /(1+coeficiente)
if letra=='A':
precio = precio_siva
importe_subtotal = (precio * cant)
importe_iva = round(importe_subtotal * coeficiente,2)
importe_total = round(importe_subtotal,2) + importe_iva
else:
precio = precio
importe_subtotal = (precio * cant)
importe_iva = round(importe_subtotal-(importe_subtotal/(1+coeficiente)),2)
importe_total = round(importe_subtotal,2)
importe_subtotal = importe_total - importe_iva;
prod={'precio':round(precio,2),'importe_iva':round(importe_iva,2),'importe_subtotal':round(importe_subtotal,2),'importe_total':round(importe_total,2)}
return prod
def buscarPrecioListaProd(p,lista):
try:
coeficiente = 0
pventa = 0
precio_siva = 0
costo_siva = 0
total_iva=0
precio_tot = 0
pcosto = 0
tasa_iva = 5 #Por defecto 0.21
pitc = 0.00
ptasa = 0.00
unidad = 'u.'
coeficiente = p.tasa_iva.coeficiente
tasa_iva = p.tasa_iva.id
unidad = p.get_unidad_display()
try:
prod_lista = prod_producto_lprecios.objects.get(producto=p,lista_precios=lista)
except:
prod_lista = None
if prod_lista:
pventa = prod_lista.precio_venta
pcosto = prod_lista.precio_cimp
pitc = prod_lista.precio_itc
ptasa = prod_lista.precio_tasa
precio_siva = pventa /(1+coeficiente)
precio_siva = Decimal(round(precio_siva,2))
if prod_lista:
costo_siva = prod_lista.precio_costo
total_iva = pventa - precio_siva
total_iva = Decimal(round(total_iva, 2))
precio_tot = pventa
prod={'precio_venta':pventa,'precio_costo':pcosto,'tasa_iva__id':tasa_iva,'tasa_iva__coeficiente':coeficiente
,'unidad':unidad,'precio_siva':precio_siva,'total_iva':total_iva,'precio_tot':precio_tot,'costo_siva':costo_siva,'pitc':pitc,'ptasa':ptasa}
except:
prod = {}
return prod
@login_required
def buscarDatosEntidad(request):
lista= {}
try:
id = request.GET['id']
entidad = egr_entidad.objects.get(id=id)
dcto=entidad.dcto_general or 0
tope_cta_cte = entidad.tope_cta_cte
lista_precios = 1
if entidad.lista_precios_defecto:
lista_precios = entidad.lista_precios_defecto.id
if tope_cta_cte>0:
saldo = entidad.get_saldo_pendiente()
else:
saldo = 0
if not tope_cta_cte:
saldo_sobrepaso = 0
else:
saldo_sobrepaso = saldo - tope_cta_cte
lista = {'fact_categFiscal':entidad.fact_categFiscal,'dcto_general':dcto,'saldo_sobrepaso':saldo_sobrepaso,'lista_precios':lista_precios}
except:
lista= {}
return HttpResponse( json.dumps(lista, cls=DjangoJSONEncoder), content_type='application/json' )
@login_required
def setearLetraCPB(request):
try:
id = request.GET['id']
tipo = int(request.GET['tipo'])
entidad = egr_entidad.objects.get(id=id)
empr=empresa_actual(request)
#Si el tipo es de Compra(2) paso los params a la inversa
if tipo==2:
letra = get_letra(empr.categ_fiscal,entidad.fact_categFiscal)
else:
letra = get_letra(entidad.fact_categFiscal,empr.categ_fiscal)
letra=list({letra})
except:
letra= []
return HttpResponse( json.dumps(letra, cls=DjangoJSONEncoder), content_type='application/json' )
@login_required
def setearCta_FP(request):
try:
fp = request.GET.get('fp', None)
cta = request.GET.get('cta',None)
datos= []
if fp and not cta:
tipo_fp = cpb_tipo_forma_pago.objects.get(id=fp)
cta = tipo_fp.cuenta.id
datos = [int(cta)]
elif cta and not fp:
try:
tipo_fp = cpb_cuenta.objects.get(id=cta).tipo_forma_pago
banco = cpb_cuenta.objects.get(id=cta).banco
cbu = cpb_cuenta.objects.get(id=cta).nro_cuenta_bancaria
if tipo_fp:
fp= tipo_fp.id
datos.append(int(fp))
if banco:
banco = banco.id
datos.append(int(banco))
datos.append(cbu)
except:
tipo_fp = None
except:
datos= []
return HttpResponse( json.dumps(datos, cls=DjangoJSONEncoder), content_type='application/json' )
@login_required
def ultimp_nro_cpb_ajax(request):
ttipo = request.GET.get('cpb_tipo',0)
letra = request.GET.get('letra','X')
pto_vta = request.GET.get('pto_vta',0)
entidad = request.GET.get('entidad',None)
if ttipo=='':
ttipo=0
if letra=='':
letra='X'
if pto_vta=='':
pto_vta=0
if entidad=='':
entidad=None
try:
tipo=cpb_tipo.objects.get(id=ttipo)
nro = 1
if tipo.usa_pto_vta == True:
pv = cpb_pto_vta.objects.get(numero=int(pto_vta),empresa=empresa_actual(request))
ult_nro = cpb_pto_vta_numero.objects.get(cpb_tipo=tipo,letra=letra,cpb_pto_vta=pv,empresa=empresa_actual(request)).ultimo_nro
nro = ult_nro+1
else:
nro = 1
if entidad:
entidad = egr_entidad.objects.get(id=entidad)
ult_cpb = cpb_comprobante.objects.filter(entidad=entidad,cpb_tipo=tipo,letra=letra,pto_vta=int(pto_vta),empresa=empresa_actual(request)).order_by('numero').last()
if ult_cpb:
nro = ult_cpb.numero + 1
else:
tipo=cpb_tipo.objects.get(id=ttipo)
nro = tipo.ultimo_nro + 1
except:
nro = 1
nro=list({nro})
return HttpResponse( json.dumps(nro, cls=DjangoJSONEncoder), content_type='application/json' )
@login_required
def buscarDatosCPB(request):
try:
id = request.GET['id']
saldo=saldo_cpb(id)
cpbs=list({'saldo':saldo})
except:
cpbs= []
return HttpResponse( json.dumps(cpbs, cls=DjangoJSONEncoder), content_type='application/json' )
@login_required
def verifCobranza(request):
cpbs = request.GET.getlist('cpbs[]')
cant = 0
if cpbs:
entidades = list(cpb_comprobante.objects.filter(id__in=cpbs).values_list('entidad',flat=True))
cant=len(set(entidades))
return HttpResponse(json.dumps(cant), content_type = "application/json")
@login_required
def verifUnificacion(request):
cpbs = request.GET.getlist('cpbs[]')
cant = 0
data= {}
if cpbs:
comprobantes = cpb_comprobante.objects.filter(id__in=cpbs,cae=None,estado__id__lte=2,cpb_tipo__tipo__in=[1,2,3,9,21,22,23])
cant_cpbs = len(set(list(comprobantes.values_list('id',flat=True))))
cant_entidades = len(set(list(comprobantes.values_list('entidad',flat=True))))
cant_cpb_tipo = len(set(list(comprobantes.values_list('cpb_tipo',flat=True))))
data = {'cant_cpbs':int(cant_cpbs),'cant_entidades':int(cant_entidades),'cant_cpb_tipo':int(cant_cpb_tipo)}
return HttpResponse(json.dumps(data,cls=DjangoJSONEncoder), content_type = "application/json")
@login_required
def presup_aprobacion(request,id,estado):
cpb = cpb_comprobante.objects.get(pk=id)
cpb.presup_aprobacion=cpb_estado.objects.get(id=estado)
cpb.save()
messages.success(request, u'Los datos se guardaron con éxito!')
return HttpResponseRedirect(reverse('cpb_presup_listado'))
@login_required
def presup_anular_reactivar(request,id,estado):
cpb = cpb_comprobante.objects.get(pk=id)
cpb.estado=cpb_estado.objects.get(id=estado)
if int(estado)==1:
cpb.presup_aprobacion=cpb_estado.objects.get(id=estado)
cpb.save()
messages.success(request, u'Los datos se guardaron con éxito!')
return HttpResponseRedirect(reverse('cpb_presup_listado'))
@login_required
def cpb_anular_reactivar(request,id,estado,descr=None):
cpb = cpb_comprobante.objects.get(pk=id)
#Si es Factura de Venta/Compra y tiene pago/cobro asociado
if ((cpb.cpb_tipo.tipo not in [4,7])and cpb.tiene_cobranzas()):
messages.error(request, u'¡El Comprobante posee movimientos de cobro/pago asociados!.Verifique')
return HttpResponseRedirect(cpb.get_listado())
#Para cada uno de los comprobantes de Movimientos/Traspaso anulo o reactivo sus CPBS(Cheques cobrados/diferidos/depositados)
fps = cpb_comprobante_fp.objects.filter(cpb_comprobante=cpb,mdcp_salida__isnull=False).values_list('mdcp_salida',flat=True)
if (len(fps)>0):
messages.error(request, u'¡El Comprobante posee movimientos de cobranza/depósito de Cheques asociados!. Verifique')
return HttpResponseRedirect(cpb.get_listado())
state = cpb_estado.objects.get(id=estado)
cpb.estado=state
if estado==3:
cpb.anulacion_fecha=hoy()
else:
cpb.anulacion_fecha=None
if descr:
cpb.anulacion_motivo = descr
cpb.save()
movs = cpb_comprobante_fp.objects.filter(pk__in=fps)
for m in movs:
m.cpb_comprobante.estado = state
if estado==3:
m.cpb_comprobante.anulacion_fecha=hoy()
else:
m.cpb_comprobante.anulacion_fecha=None
if descr:
m.cpb_comprobante.anulacion_motivo = descr
m.cpb_comprobante.save()
#Para cada uno de los comprobantes del Recibo/OP recalculo su saldo (Recibos/OP anulados no suman)
if (cpb.cpb_tipo.tipo in [4,7]):
cobranzas = cpb_cobranza.objects.filter(cpb_comprobante=cpb)
for c in cobranzas:
recalcular_saldo_cpb(c.cpb_factura.pk)
messages.success(request, u'¡Los datos se guardaron con éxito!')
return HttpResponseRedirect(cpb.get_listado())
@login_required
def cpb_facturar(request,id,nro):
try:
cpb = cpb_comprobante.objects.get(pk=id)
except:
cpb=None
#cpb.estado=cpb_estado.objects.get(id=4)
if cpb:
if nro == None:
nro = random.randrange(0, 99999999999999, 14)
nro = "{num:>014}".format(num=str(nro))
cpb.cae = nro
cpb.cae_vto = cpb.fecha_cpb+timedelta(days=30)
cpb.save()
messages.success(request, u'Los datos se guardaron con éxito!')
return HttpResponseRedirect(cpb.get_listado())
return HttpResponseRedirect(reverse('cpb_venta_listado'))
from felectronica.facturacion import facturarAFIP,consultar_cae,facturarAFIP_simulac
@login_required
def cpb_facturar_afip(request):
respuesta = []
id = request.GET.get('id', None)
try:
cpb = cpb_comprobante.objects.get(pk=id)
except:
cpb=None
if cpb:
if cpb.cae is None:
respuesta = facturarAFIP(request,id)
estado = respuesta.get('resultado','')
cae = respuesta.get('cae','')
vto_cae = respuesta.get('fecha_vencimiento',None)
detalle = respuesta.get('detalle','')
nro_cpb = respuesta.get('cpb_nro','')
if (estado=='A')and(cae!=''):
#cpb.estado=cpb_estado.objects.get(id=4)
cpb.cae = cae
cpb.cae_vto = vto_cae
cpb.cae_errores = None
if detalle!='':
cpb.cae_observaciones = cpb.cae_observaciones+' '+detalle
cpb.numero = int(nro_cpb)
messages.success(request, u'¡Se facturó correctamente!')
else:
cpb.cae = None
cpb.cae_errores = respuesta.get('errores','')
cpb.cae_excepcion = respuesta.get('excepcion','')
cpb.cae_traceback = respuesta.get('traceback','')
cpb.cae_xml_request = respuesta.get('XmlRequest','')
cpb.cae_xml_response = respuesta.get('XmlResponse','')
cpb.save()
else:
messages.error(request, u'El comprobante ya posée CAE!')
respuesta=dict(errores=u'El comprobante ya posée CAE!')
return HttpResponse(json.dumps(respuesta,cls=DjangoJSONEncoder), content_type = "application/json")
@login_required
def respuesta(request):
respuesta = ['holaaa']
import time
time.sleep(5)
return HttpResponse(json.dumps(respuesta,cls=DjangoJSONEncoder), content_type = "application/json")
@login_required
def cpb_facturar_afip_id(request,id):
respuesta = []
try:
cpb = cpb_comprobante.objects.get(pk=id)
except:
cpb=None
if cpb:
#cpb.estado=cpb_estado.objects.get(id=4)
respuesta = facturarAFIP(request,id)
estado = respuesta.get('resultado','')
cae = respuesta.get('cae','')
vto_cae = respuesta.get('fecha_vencimiento',None)
detalle = respuesta.get('detalle','')
observaciones = respuesta.get('observaciones','')
errores = respuesta.get('errores','')
nro_cpb = respuesta.get('cpb_nro','')
if (estado=='A')and(cae!=''):
cpb.cae = cae
cpb.cae_vto = vto_cae
if detalle!='':
cpb.observacion = cpb.observacion+' '+detalle
cpb.numero = int(nro_cpb)
cpb.save()
messages.success(request, u'Los datos se guardaron con éxito!')
return HttpResponse(json.dumps(respuesta,cls=DjangoJSONEncoder), content_type = "application/json")
@login_required
def cpbs_anular(request):
limpiar_sesion(request)
id_cpbs = request.GET.getlist('id_cpb')
id_cpbs = cpb_comprobante.objects.filter(id__in=id_cpbs,cae=None).values_list('id',flat=True)
for c in id_cpbs:
cpb_anular_reactivar(request,c,3)
return HttpResponse(json.dumps(len(id_cpbs)), content_type = "application/json")
class EditarSeguimientoView(VariablesMixin,AjaxUpdateView):
form_class = SeguimientoForm
model = cpb_comprobante
pk_url_kwarg = 'id'
template_name = 'modal/general/form_seguimiento.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(EditarSeguimientoView, self).dispatch(*args, **kwargs)
def form_valid(self, form):
messages.success(self.request, u'Los datos se guardaron con éxito!')
return super(EditarSeguimientoView, self).form_valid(form)
def form_invalid(self, form):
return self.render_to_response(self.get_context_data(form=form))
def get_initial(self):
initial = super(EditarSeguimientoView, self).get_initial()
return initial
# def unique_field_formset(field_name):
# from django.forms.models import BaseInlineFormSet
# class UniqueFieldFormSet (BaseInlineFormSet):
# def clean(self):
# if any(self.errors):
# # Don't bother validating the formset unless each form is valid on its own
# return
# values = set()
# for form in self.forms:
# value = form.cleaned_data[field_name]
# if value in values:
# raise forms.ValidationError('No deben repetirse productos!')
# values.add(value)
# return UniqueFieldFormSet
####################################################################################
from easy_pdf.rendering import render_to_pdf_response,render_to_pdf
from reportlab.lib import units
from reportlab.graphics import renderPM
from reportlab.graphics.barcode import createBarcodeDrawing
from reportlab.graphics.shapes import Drawing
from general.generarI25 import GenerarImagen
from general.base64 import encodestring,b64encode
import StringIO
def armarCodBar(cod):
barcode = GenerarImagen(codigo=cod)
output = StringIO.StringIO()
barcode.save(output,format="PNG")
data = encodestring(output.getvalue())
return format(data)
def armarQR(qr_data):
qr,url = GenerarQR(**qr_data)
output = StringIO.StringIO()
qr.save(output,format="PNG")
data = encodestring(output.getvalue())
return format(data),url
@login_required
def imprimirFactura_CB(request,id,pdf=None):
cpb = cpb_comprobante.objects.get(id=id)
#puedeVerPadron(request,c.id_unidad.pk)
if not cpb:
raise Http404
detalle_comprobante = cpb_comprobante_detalle.objects.filter(cpb_comprobante=cpb)
detalle_totales_iva = cpb_comprobante_tot_iva.objects.filter(cpb_comprobante=cpb)
discrimina_iva = cpb.letra == 'A'
if cpb.condic_pago == 2:
cobranzas = cpb_comprobante_fp.objects.filter(cpb_comprobante__cpb_cobranza_cpb__cpb_factura=cpb,cpb_comprobante__estado__pk__lt=3)
cantidad = cobranzas.count()
else:
cobranzas = None
cantidad = 0
try:
cod_cpb = cpb_nro_afip.objects.get(cpb_tipo=cpb.cpb_tipo.tipo,letra=cpb.letra).numero_afip
codigo_letra = '{0:0{width}}'.format(cod_cpb,width=2)
except:
codigo_letra = '000'
if cpb.letra == 'X':
codigo_letra = '000'
tipo_cpb = 'REMITO X'
leyenda = u'DOCUMENTO NO VÁLIDO COMO FACTURA'
facturado = (cpb.cae!=None)
cantidad = detalle_comprobante.count() + cantidad
total_exng = cpb.importe_exento + cpb.importe_no_gravado + cpb.importe_perc_imp
if discrimina_iva:
total_bruto = cpb.importe_subtotal
else:
total_bruto = cpb.importe_total
renglones = 20 - cantidad
if renglones < 0:
renglones = 0
renglones = range(renglones)
context = Context()
fecha = hoy()
try:
total_imp1 = cpb.importe_tasa1
total_imp2 = cpb.importe_tasa2
total_imp = total_imp1 + total_imp2
except:
total_imp1=0
total_imp2=0
total_imp=0
try:
config = empresa_actual(request)
except gral_empresa.DoesNotExist:
config = None
sujeto_retencion = None
if cpb.cpb_tipo.usa_pto_vta == True:
c = cpb.get_pto_vta()
if c.leyenda and discrimina_iva:
sujeto_retencion = u"OPERACIÓN SUJETA A RETENCIÓN"
else:
c = config
tipo_logo_factura = c.tipo_logo_factura
cuit = c.cuit
ruta_logo = c.ruta_logo
nombre_fantasia = c.nombre_fantasia
domicilio = c.domicilio
email = c.email
telefono = c.telefono
celular = c.celular
iibb = c.iibb
categ_fiscal = c.categ_fiscal
fecha_inicio_activ = c.fecha_inicio_activ
if facturado:
cod = ""
cod += str(cuit).rjust(11, "0") #CUIT
cod += str(cod_cpb).rjust(2, "0") #TIPO_CPB
cod += str(cpb.pto_vta).rjust(4, "0") #PTO_VTA
cod += str(cpb.cae).rjust(14, "0") #CAE
cod += str(cpb.cae_vto.strftime("%Y%m%d")).rjust(8, "0") #VTO_CAE
cod += str(digVerificador(cod))
codbar = armarCodBar(cod)
codigo = cod
template = 'general/facturas/factura.html'
if pdf:
return render_to_pdf(template,locals())
return render_to_pdf_response(request, template, locals())
@login_required
def imprimirFacturaQR(request,id,pdf=None):
cpb = cpb_comprobante.objects.get(id=id)
#puedeVerPadron(request,c.id_unidad.pk)
if not cpb:
raise Http404
detalle_comprobante = cpb_comprobante_detalle.objects.filter(cpb_comprobante=cpb)
detalle_totales_iva = cpb_comprobante_tot_iva.objects.filter(cpb_comprobante=cpb)
discrimina_iva = cpb.letra == 'A'
if cpb.condic_pago == 2:
cobranzas = cpb_comprobante_fp.objects.filter(cpb_comprobante__cpb_cobranza_cpb__cpb_factura=cpb,cpb_comprobante__estado__pk__lt=3)
cantidad = cobranzas.count()
else:
cobranzas = None
cantidad = 0
try:
cod_cpb = cpb_nro_afip.objects.get(cpb_tipo=cpb.cpb_tipo.tipo,letra=cpb.letra).numero_afip
codigo_letra = '{0:0{width}}'.format(cod_cpb,width=2)
except:
codigo_letra = '000'
if cpb.letra == 'X':
codigo_letra = '000'
tipo_cpb = 'REMITO X'
leyenda = u'DOCUMENTO NO VÁLIDO COMO FACTURA'
facturado = (cpb.cae!=None)
cantidad = detalle_comprobante.count() + cantidad
total_exng = cpb.importe_exento + cpb.importe_no_gravado + cpb.importe_perc_imp
if discrimina_iva:
total_bruto = cpb.importe_subtotal
else:
total_bruto = cpb.importe_total
renglones = 20 - cantidad
if renglones < 0:
renglones = 0
renglones = range(renglones)
context = Context()
fecha = hoy()
try:
total_imp1 = cpb.importe_tasa1
total_imp2 = cpb.importe_tasa2
total_imp = total_imp1 + total_imp2
except:
total_imp1=0
total_imp2=0
total_imp=0
try:
config = empresa_actual(request)
except gral_empresa.DoesNotExist:
config = None
sujeto_retencion, leyenda_afip = None, None
if cpb.cpb_tipo.usa_pto_vta == True:
c = cpb.get_pto_vta()
if c.leyenda and discrimina_iva:
sujeto_retencion = u"OPERACIÓN SUJETA A RETENCIÓN"
leyenda_afip = u"El crédito fiscal discriminado en el presente comprobante, sólo podrá ser computado a efectos del Régimen de Sostenimiento \
e Inclusión Fiscal para Pequeños Contribuyentes de la Ley Nº 27.618"
else:
c = config
tipo_logo_factura = c.tipo_logo_factura
cuit = c.cuit
ruta_logo = c.ruta_logo
nombre_fantasia = c.nombre_fantasia
domicilio = c.domicilio
email = c.email
telefono = c.telefono
celular = c.celular
iibb = c.iibb
categ_fiscal = c.categ_fiscal
fecha_inicio_activ = c.fecha_inicio_activ
if facturado:
#fecha="2020-10-13",cuit=30000000007, pto_vta=10, tipo_cmp=1, nro_cmp=94,
# importe=12100, moneda="PES", ctz=1.000,tipo_doc_rec=80, nro_doc_rec=20000000001,
# tipo_cod_aut="E", cod_aut=70417054367476
moneda_id = 'PES'; moneda_ctz = '1.000'
nro_doc,tipo_doc = cpb.entidad.get_nro_doc_afip()
datos_cmp = {
"fecha": cpb.fecha_cpb.strftime("%Y-%m-%d"),
"cuit": int(cuit),
"pto_vta": int(cpb.pto_vta),
"tipo_cmp": int(cpb.get_nro_afip()),
"nro_cmp": int(cpb.numero),
"importe": float(cpb.importe_total),
"moneda": moneda_id,
"ctz": float(moneda_ctz),
"tipo_doc_rec": int(tipo_doc),
"nro_doc_rec": int(nro_doc),
"cod_aut": int(cpb.cae),
}
qrcode,url = armarQR(datos_cmp)
template = 'general/facturas/facturaQR.html'
#template = 'general/facturas/factura.html'
if pdf:
return render_to_pdf(template,locals())
return render_to_pdf_response(request, template, locals())
@login_required
def imprimirFacturaHTML(request,id,pdf=None):
cpb = cpb_comprobante.objects.get(id=id)
#puedeVerPadron(request,c.id_unidad.pk)
if not cpb:
raise Http404
detalle_comprobante = cpb_comprobante_detalle.objects.filter(cpb_comprobante=cpb)
detalle_totales_iva = cpb_comprobante_tot_iva.objects.filter(cpb_comprobante=cpb)
discrimina_iva = cpb.letra == 'A'
if cpb.condic_pago == 2:
cobranzas = cpb_comprobante_fp.objects.filter(cpb_comprobante__cpb_cobranza_cpb__cpb_factura=cpb,cpb_comprobante__estado__pk__lt=3)
cantidad = cobranzas.count()
else:
cobranzas = None
cantidad = 0
try:
cod_cpb = cpb_nro_afip.objects.get(cpb_tipo=cpb.cpb_tipo.tipo,letra=cpb.letra).numero_afip
codigo_letra = '{0:0{width}}'.format(cod_cpb,width=2)
except:
codigo_letra = '000'
if cpb.letra == 'X':
codigo_letra = '000'
tipo_cpb = 'REMITO X'
leyenda = u'DOCUMENTO NO VÁLIDO COMO FACTURA'
facturado = (cpb.cae!=None)
cantidad = detalle_comprobante.count() + cantidad
total_exng = cpb.importe_exento + cpb.importe_no_gravado
if discrimina_iva:
total_bruto = cpb.importe_subtotal
else:
total_bruto = cpb.importe_total
renglones = 20 - cantidad
if renglones < 0:
renglones = 0
renglones = range(renglones)
context = Context()
fecha = datetime.now()
try:
config = empresa_actual(request)
except gral_empresa.DoesNotExist:
config = None
if cpb.cpb_tipo.usa_pto_vta == True:
c = cpb.get_pto_vta()
else:
c = config
tipo_logo_factura = c.tipo_logo_factura
cuit = c.cuit
ruta_logo = c.ruta_logo
nombre_fantasia = c.nombre_fantasia
domicilio = c.domicilio
email = c.email
telefono = c.telefono
celular = c.celular
iibb = c.iibb
categ_fiscal = c.categ_fiscal
fecha_inicio_activ = c.fecha_inicio_activ
if facturado:
cod = ""
cod += str(cuit).rjust(11, "0") #CUIT
cod += str(cod_cpb).rjust(2, "0") #TIPO_CPB
cod += str(cpb.pto_vta).rjust(4, "0") #PTO_VTA
cod += str(cpb.cae).rjust(14, "0") #CAE
cod += str(cpb.cae_vto.strftime("%Y%m%d")).rjust(8, "0") #VTO_CAE
cod += str(digVerificador(cod))
codbar = armarCodBar(cod)
codigo = cod
template = 'general/facturas/factura.html'
return render(request, template, locals())
@login_required
def imprimirPresupuesto(request,id,pdf=None):
cpb = cpb_comprobante.objects.get(id=id)
if not cpb:
raise Http404
#puedeVerPadron(request,c.id_unidad.pk)
try:
config = empresa_actual(request)
except gral_empresa.DoesNotExist:
config = None
c = config
tipo_logo_factura = c.tipo_logo_factura
cuit = c.cuit
ruta_logo = c.ruta_logo
nombre_fantasia = c.nombre_fantasia
domicilio = c.domicilio
email = c.email
telefono = c.telefono
celular = c.celular
iibb = c.iibb
categ_fiscal = c.categ_fiscal
fecha_inicio_activ = c.fecha_inicio_activ
detalle_comprobante = cpb_comprobante_detalle.objects.filter(cpb_comprobante=cpb)
cantidad = detalle_comprobante.count()
renglones = 20 - cantidad
if renglones < 0:
renglones = 0
renglones = range(renglones)
context = Context()
fecha = datetime.now()
discrimina_iva = cpb.letra == 'A'
factura_X = cpb.letra == 'X'
if discrimina_iva:
subtotal = cpb.importe_subtotal
else:
subtotal = cpb.importe_total
codigo_letra = '000'
leyenda = u'DOCUMENTO NO VÁLIDO COMO FACTURA'
template = 'general/facturas/presupuesto.html'
if pdf:
return render_to_pdf(template,locals())
return render_to_pdf_response(request, template, locals())
@login_required
def imprimirRemito(request,id,pdf=None):
cpb = cpb_comprobante.objects.get(id=id)
if not cpb:
raise Http404
#puedeVerPadron(request,c.id_unidad.pk)
try:
config = empresa_actual(request)
except gral_empresa.DoesNotExist:
config = None
c = config
tipo_logo_factura = c.tipo_logo_factura
cuit = c.cuit
ruta_logo = c.ruta_logo
nombre_fantasia = c.nombre_fantasia
domicilio = c.domicilio
email = c.email
telefono = c.telefono
celular = c.celular
iibb = c.iibb
categ_fiscal = c.categ_fiscal
fecha_inicio_activ = c.fecha_inicio_activ
detalle_comprobante = cpb_comprobante_detalle.objects.filter(cpb_comprobante=cpb)
cantidad = detalle_comprobante.count()
leyenda = u'DOCUMENTO NO VÁLIDO COMO FACTURA'
codigo_letra = '000'
renglones = 20 - cantidad
if renglones < 0:
renglones = 0
renglones = range(renglones)
context = Context()
fecha = hoy()
tipo = 'ORIGINAL'
template = 'general/facturas/remito.html'
if pdf:
return render_to_pdf(template,locals())
return render_to_pdf_response(request, template, locals())
@login_required
def imprimirCobranza(request,id,pdf=None):
cpb = cpb_comprobante.objects.get(id=id)
if not cpb:
raise Http404
#puedeVerPadron(request,c.id_unidad.pk)
try:
config = empresa_actual(request)
except gral_empresa.DoesNotExist:
config = None
c = config
tipo_logo_factura = c.tipo_logo_factura
cuit = c.cuit
ruta_logo = c.ruta_logo
nombre_fantasia = c.nombre_fantasia
domicilio = c.domicilio
email = c.email
telefono = c.telefono
celular = c.celular
iibb = c.iibb
categ_fiscal = c.categ_fiscal
fecha_inicio_activ = c.fecha_inicio_activ
cobranzas = cpb_cobranza.objects.filter(cpb_comprobante=cpb)
retenciones = cpb_comprobante_retenciones.objects.filter(cpb_comprobante=cpb)
leyenda = u'DOCUMENTO NO VÁLIDO COMO FACTURA'
pagos = cpb_comprobante_fp.objects.filter(cpb_comprobante=cpb)
codigo_letra = '000'
context = Context()
fecha = hoy()
template = 'general/facturas/cobranza.html'
if pdf:
return render_to_pdf(template,locals())
return render_to_pdf_response(request, template, locals())
@login_required
def imprimirCobranzaCtaCte(request,id,pdf=None):
cpb = cpb_comprobante.objects.get(id=id)
if not cpb:
raise Http404
#puedeVerPadron(request,c.id_unidad.pk)
try:
config = empresa_actual(request)
except gral_empresa.DoesNotExist:
raise Http404
c = config
tipo_logo_factura = c.tipo_logo_factura
cuit = c.cuit
ruta_logo = c.ruta_logo
nombre_fantasia = c.nombre_fantasia
domicilio = c.domicilio
email = c.email
telefono = c.telefono
celular = c.celular
iibb = c.iibb
categ_fiscal = c.categ_fiscal
fecha_inicio_activ = c.fecha_inicio_activ
leyenda = u'DOCUMENTO NO VÁLIDO COMO FACTURA'
pagos = cpb_comprobante_fp.objects.filter(cpb_comprobante=cpb)
codigo_letra = '000'
retenciones = cpb_comprobante_retenciones.objects.filter(cpb_comprobante=cpb)
context = Context()
fecha = hoy()
total_ctacte = cpb_comprobante.objects.filter(entidad=cpb.entidad,pto_vta__in=pto_vta_habilitados_list(request),cpb_tipo__usa_ctacte=True,cpb_tipo__compra_venta='V'\
,empresa=config,estado__in=[1,2],fecha_cpb__lte=cpb.fecha_cpb).aggregate(sum=Sum(F('importe_total')*F('cpb_tipo__signo_ctacte'), output_field=DecimalField()))['sum'] or 0
if total_ctacte<0:
total_ctacte=0
template = 'general/facturas/cobranza_ctacte.html'
if pdf:
return render_to_pdf(template,locals())
return render_to_pdf_response(request, template, locals())
@login_required
def imprimirPagoCtaCte(request,id,pdf=None):
cpb = cpb_comprobante.objects.get(id=id)
if not cpb:
raise Http404
#puedeVerPadron(request,c.id_unidad.pk)
try:
config = empresa_actual(request)
except gral_empresa.DoesNotExist:
raise Http404
c = config
tipo_logo_factura = c.tipo_logo_factura
cuit = c.cuit
ruta_logo = c.ruta_logo
nombre_fantasia = c.nombre_fantasia
domicilio = c.domicilio
email = c.email
telefono = c.telefono
celular = c.celular
iibb = c.iibb
categ_fiscal = c.categ_fiscal
fecha_inicio_activ = c.fecha_inicio_activ
cobranzas = cpb_cobranza.objects.filter(cpb_comprobante=cpb,cpb_comprobante__estado__pk__lt=3)
leyenda = u'DOCUMENTO NO VÁLIDO COMO FACTURA'
pagos = cpb_comprobante_fp.objects.filter(cpb_comprobante=cpb,cpb_comprobante__estado__pk__lt=3)
codigo_letra = '000'
context = Context()
fecha = datetime.now()
total_ctacte = cpb_comprobante.objects.filter(entidad=cpb.entidad,pto_vta__in=pto_vta_habilitados_list(request),cpb_tipo__usa_ctacte=True,cpb_tipo__compra_venta='C'\
,empresa=config,estado__in=[1,2],fecha_cpb__lte=cpb.fecha_cpb).aggregate(sum=Sum(F('importe_total')*F('cpb_tipo__signo_ctacte'), output_field=DecimalField()))['sum'] or 0
if total_ctacte<0:
total_ctacte=0
template = 'general/facturas/orden_pago_ctacte.html'
if pdf:
return render_to_pdf(template,locals())
return render_to_pdf_response(request, template, locals())
@login_required
def imprimirPago(request,id,pdf=None):
cpb = cpb_comprobante.objects.get(id=id)
if not cpb:
raise Http404
#puedeVerPadron(request,c.id_unidad.pk)
try:
config = empresa_actual(request)
except gral_empresa.DoesNotExist:
config = None
c = config
tipo_logo_factura = c.tipo_logo_factura
cuit = c.cuit
ruta_logo = c.ruta_logo
nombre_fantasia = c.nombre_fantasia
domicilio = c.domicilio
email = c.email
telefono = c.telefono
celular = c.celular
iibb = c.iibb
categ_fiscal = c.categ_fiscal
fecha_inicio_activ = c.fecha_inicio_activ
cobranzas = cpb_cobranza.objects.filter(cpb_comprobante=cpb,cpb_comprobante__estado__pk__lt=3)
leyenda = u'DOCUMENTO NO VÁLIDO COMO FACTURA'
pagos = cpb_comprobante_fp.objects.filter(cpb_comprobante=cpb,cpb_comprobante__estado__pk__lt=3)
codigo_letra = '000'
context = Context()
fecha = datetime.now()
template = 'general/facturas/orden_pago.html'
if pdf:
return render_to_pdf(template,locals())
return render_to_pdf_response(request, template, locals())
#************* EMAIL **************
from django.core.mail import send_mail, EmailMessage
from django.core.mail.backends.smtp import EmailBackend
def verifEmail(request):
id = request.POST.get('id',None)
email = None
try:
email = cpb_comprobante.objects.filter(id=id).first().entidad.get_correo()
if not email:
email=''
except:
email=''
return HttpResponse(json.dumps(email), content_type = "application/json")
@login_required
def mandarEmail(request,id):
try:
email = str(request.GET.get('email',''))
cpb = cpb_comprobante.objects.get(id=id)
mail_destino = []
if not email:
email=str(cpb.entidad.email)
direccion = email
if not direccion:
messages.error(request, 'El comprobante no pudo ser enviado! (verifique la dirección de correo del destinatario)')
return HttpResponseRedirect(cpb.get_listado())
mail_destino.append(direccion)
try:
config = empresa_actual(request)
except gral_empresa.DoesNotExist:
raise ValueError
datos = config.get_datos_mail()
mail_cuerpo = datos['mail_cuerpo']
mail_servidor = datos['mail_servidor']
mail_puerto = int(datos['mail_puerto'])
mail_usuario = datos['mail_usuario']
mail_password = str(datos['mail_password'])
mail_origen = datos['mail_origen']
if cpb.cpb_tipo.tipo == 4 or cpb.cpb_tipo.tipo == 7:
post_pdf = imprimirCobranza(request,id,True)
elif cpb.cpb_tipo.tipo == 5:
post_pdf = imprimirRemito(request,id,True)
elif cpb.cpb_tipo.tipo == 6:
post_pdf = imprimirPresupuesto(request,id,True)
else:
post_pdf = imprimirFacturaQR(request,id,True)
fecha = datetime.now()
nombre = "%s" % cpb
image_url = request.build_absolute_uri(reverse("chequear_email",kwargs={'id': cpb.id}))
html_content = get_template('general/varios/email.html').render({'mensaje': mail_cuerpo,'image_url':image_url})
backend = EmailBackend(host=mail_servidor, port=mail_puerto, username=mail_usuario,password=<PASSWORD>,fail_silently=False)
email = EmailMessage( subject=u'%s' % (cpb.get_cpb_tipo),body=html_content,from_email=mail_origen,to=mail_destino,connection=backend)
email.attach(u'%s.pdf' %nombre,post_pdf, "application/pdf")
email.content_subtype = 'html'
email.send()
cpb.fecha_envio_mail=fecha
cpb.save()
messages.success(request, 'El comprobante fué enviado con éxito!')
return HttpResponseRedirect(cpb.get_listado())
except Exception as e:
messages.error(request, 'El comprobante no pudo ser enviado! '+str(e))
return HttpResponseRedirect(cpb.get_listado())
#************* BANCOS **************
class BancosView(VariablesMixin,ListView):
model = cpb_banco
template_name = 'general/lista_bancos.html'
context_object_name = 'bancos'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
if not tiene_permiso(self.request,'gral_configuracion'):
return redirect(reverse('principal'))
return super(BancosView, self).dispatch(*args, **kwargs)
def get_queryset(self):
try:
queryset = cpb_banco.objects.filter(empresa__id__in=empresas_habilitadas(self.request))
except:
queryset = cpb_banco.objects.none()
return queryset
class BancosCreateView(VariablesMixin,AjaxCreateView):
form_class = BancosForm
template_name = 'modal/general/form_banco.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
if not tiene_permiso(self.request,'gral_configuracion'):
return redirect(reverse('principal'))
return super(BancosCreateView, self).dispatch(*args, **kwargs)
def form_valid(self, form):
form.instance.empresa = empresa_actual(self.request)
messages.success(self.request, u'Los datos se guardaron con éxito!')
return super(BancosCreateView, self).form_valid(form)
def get_initial(self):
initial = super(BancosCreateView, self).get_initial()
return initial
class BancosEditView(VariablesMixin,AjaxUpdateView):
form_class = BancosForm
model = cpb_banco
pk_url_kwarg = 'id'
template_name = 'modal/general/form_banco.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
if not tiene_permiso(self.request,'gral_configuracion'):
return redirect(reverse('principal'))
return super(BancosEditView, self).dispatch(*args, **kwargs)
def form_valid(self, form):
messages.success(self.request, u'Los datos se guardaron con éxito!')
return super(BancosEditView, self).form_valid(form)
def get_initial(self):
initial = super(BancosEditView, self).get_initial()
return initial
@login_required
def BancosDeleteView(request, id):
try:
objeto = get_object_or_404(cpb_banco, id=id)
if not tiene_permiso(request,'gral_configuracion'):
return redirect(reverse('principal'))
objeto.delete()
messages.success(request, u'¡Los datos se guardaron con éxito!')
except:
messages.error(request, u'¡Los datos no pudieron eliminarse!')
return redirect('bancos_listado')
#************* MOVIMIENTOS INTERNOS **************
class MovInternosViewList(VariablesMixin,ListView):
model = cpb_comprobante
template_name = 'general/movimientos/movimientos_listado.html'
context_object_name = 'movimientos'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
limpiar_sesion(self.request)
if not tiene_permiso(self.request,'cpb_movimientos'):
return redirect(reverse('principal'))
return super(MovInternosViewList, self).dispatch(*args,**kwargs)
def get_context_data(self, **kwargs):
context = super(MovInternosViewList, self).get_context_data(**kwargs)
try:
config = empresa_actual(self.request)
except gral_empresa.DoesNotExist:
config = None
form = ConsultaCpbsCompras(self.request.POST or None,empresa=config,request=self.request)
movimientos = cpb_comprobante_fp.objects.filter(cpb_comprobante__cpb_tipo__id=13,cpb_comprobante__empresa__id__in=empresas_habilitadas(self.request))\
.order_by('-cpb_comprobante__fecha_cpb','-cpb_comprobante__fecha_creacion')\
.select_related('cpb_comprobante','tipo_forma_pago','cta_egreso','cta_ingreso','mdcp_banco')
if form.is_valid():
fdesde = form.cleaned_data['fdesde']
fhasta = form.cleaned_data['fhasta']
if fdesde:
movimientos= movimientos.filter(cpb_comprobante__fecha_cpb__gte=fdesde)
if fhasta:
movimientos= movimientos.filter(cpb_comprobante__fecha_cpb__lte=fhasta)
else:
mvs= movimientos.filter(cpb_comprobante__fecha_cpb__gte=inicioMesAnt(),cpb_comprobante__fecha_cpb__lte=finMes())
if len(mvs)==0:
mvs = movimientos[:20]
movimientos=mvs
context['form'] = form
context['movimientos'] = movimientos
return context
def post(self, *args, **kwargs):
return self.get(*args, **kwargs)
class CPBMIFPFormSet(BaseInlineFormSet):
pass
CPBFPFormSet = inlineformset_factory(cpb_comprobante, cpb_comprobante_fp,form=MovimCuentasFPForm,formset=CPBMIFPFormSet, can_delete=True,extra=0,min_num=1)
class MovInternosCreateView(VariablesMixin,CreateView):
form_class = MovimCuentasForm
template_name = 'general/movimientos/movimientos_form.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
if not tiene_permiso(self.request,'cpb_mov_abm'):
return redirect(reverse('principal'))
return super(MovInternosCreateView, self).dispatch(*args, **kwargs)
def get_initial(self):
initial = super(MovInternosCreateView, self).get_initial()
initial['tipo_form'] = 'ALTA'
return initial
def get_form_kwargs(self,**kwargs):
kwargs = super(MovInternosCreateView, self).get_form_kwargs()
kwargs['request'] = self.request
return kwargs
def get(self, request, *args, **kwargs):
self.object = None
form_class = self.get_form_class()
form = self.get_form(form_class)
CPBFPFormSet.form = staticmethod(curry(MovimCuentasFPForm,request=request))
cpb_fp = CPBFPFormSet(prefix='formFP')
return self.render_to_response(self.get_context_data(form=form,cpb_fp = cpb_fp))
def post(self, request, *args, **kwargs):
self.object = None
form_class = self.get_form_class()
form = self.get_form(form_class)
CPBFPFormSet.form = staticmethod(curry(MovimCuentasFPForm,request=request))
cpb_fp = CPBFPFormSet(self.request.POST,prefix='formFP')
if form.is_valid() and cpb_fp.is_valid():
return self.form_valid(form, cpb_fp)
else:
return self.form_invalid(form, cpb_fp)
def form_valid(self, form, cpb_fp):
self.object = form.save(commit=False)
estado=cpb_estado.objects.get(pk=2)
self.object.estado=estado
self.object.letra='X'
self.object.pto_vta=0
self.object.numero = ultimoNro(13,self.object.pto_vta,self.object.letra)
tipo=cpb_tipo.objects.get(pk=13)
self.object.cpb_tipo=tipo
self.object.empresa = empresa_actual(self.request)
self.object.usuario = usuario_actual(self.request)
self.object.fecha_imputacion=self.object.fecha_cpb
self.object.save()
cpb_fp.instance = self.object
cpb_fp.cpb_comprobante = self.object.id
cpb_fp.save()
messages.success(self.request, u'Los datos se guardaron con éxito!')
return HttpResponseRedirect(reverse('movimientos_listado'))
def form_invalid(self, form,cpb_fp):
return self.render_to_response(self.get_context_data(form=form,cpb_fp = cpb_fp))
class MovInternosEditView(VariablesMixin,UpdateView):
form_class = MovimCuentasForm
template_name = 'general/movimientos/movimientos_form.html'
pk_url_kwarg = 'id'
model = cpb_comprobante
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
if not tiene_permiso(self.request,'cpb_mov_abm'):
return redirect(reverse('principal'))
return super(MovInternosEditView, self).dispatch(*args, **kwargs)
def get_initial(self):
initial = super(MovInternosEditView, self).get_initial()
initial['tipo_form'] = 'EDICION'
return initial
def get(self, request, *args, **kwargs):
self.object = self.get_object()
form_class = self.get_form_class()
form = self.get_form(form_class)
# form.fields['numero'].widget.attrs['disabled'] = True
CPBFPFormSet.form = staticmethod(curry(MovimCuentasFPForm,request=request))
cpb_fp = CPBFPFormSet(instance=self.object,prefix='formFP')
return self.render_to_response(self.get_context_data(form=form,cpb_fp = cpb_fp))
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form_class = self.get_form_class()
form = self.get_form(form_class)
CPBFPFormSet.form = staticmethod(curry(MovimCuentasFPForm,request=request))
cpb_fp = CPBFPFormSet(self.request.POST,instance=self.object,prefix='formFP')
if form.is_valid() and cpb_fp.is_valid():
return self.form_valid(form, cpb_fp)
else:
return self.form_invalid(form, cpb_fp)
def form_valid(self, form, cpb_fp):
self.object = form.save(commit=False)
self.object.fecha_imputacion=self.object.fecha_cpb
self.object.save()
cpb_fp.instance = self.object
cpb_fp.cpb_comprobante = self.object.id
cpb_fp.save()
messages.success(self.request, u'Los datos se guardaron con éxito!')
return HttpResponseRedirect(reverse('movimientos_listado'))
def get_form_kwargs(self):
kwargs = super(MovInternosEditView, self).get_form_kwargs()
kwargs['request'] = self.request
return kwargs
def form_invalid(self, form,cpb_fp):
return self.render_to_response(self.get_context_data(form=form,cpb_fp = cpb_fp))
@login_required
def MovInternosDeleteView(request, id):
cpb = get_object_or_404(cpb_comprobante, id=id)
if not tiene_permiso(request,'cpb_mov_abm'):
return redirect(reverse('principal'))
try:
#Movim Traspaso
if cpb.cpb_tipo.pk == 13:
#traigo los fps de los recibos asociados
cpbs = cpb_comprobante_fp.objects.filter(mdcp_salida__id=cpb.id)
for c in cpbs:
c.mdcp_salida = None
c.save()
cpb.delete()
messages.success(request, u'Los datos se guardaron con éxito!')
except:
messages.error(request, u'No se pudo eliminar el Comprobante!')
return redirect('movimientos_listado')
##########################################################################
class ComprobantesVerView(VariablesMixin,DetailView):
model = cpb_comprobante
pk_url_kwarg = 'id'
context_object_name = 'cpb'
template_name = 'general/facturas/detalle_factura.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(ComprobantesVerView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(ComprobantesVerView, self).get_context_data(**kwargs)
try:
config = empresa_actual(self.request)
except gral_empresa.DoesNotExist:
config = None
cpb = self.object
context['config'] = config
detalle_comprobante = cpb_comprobante_detalle.objects.filter(cpb_comprobante=cpb).select_related('producto','tasa_iva')
context['detalle_comprobante'] = detalle_comprobante
cobranzas = cpb_comprobante_fp.objects.filter(cpb_comprobante__cpb_cobranza_cpb__cpb_factura=cpb).select_related('tipo_forma_pago')
context['cobranzas'] = cobranzas
return context
class RecibosVerView(VariablesMixin,DetailView):
model = cpb_comprobante
pk_url_kwarg = 'id'
context_object_name = 'cpb'
template_name = 'general/facturas/detalle_recibo.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(RecibosVerView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(RecibosVerView, self).get_context_data(**kwargs)
try:
config = empresa_actual(self.request)
except gral_empresa.DoesNotExist:
config = None
cpb = self.object
context['config'] = config
detalle = cpb_comprobante_fp.objects.filter(cpb_comprobante=cpb).select_related('tipo_forma_pago','mdcp_banco','cta_ingreso','cta_egreso')
context['detalle'] = detalle
cobranzas = cpb_cobranza.objects.filter(cpb_comprobante=cpb,cpb_comprobante__estado__pk__lt=3).select_related('cpb_comprobante')
context['cobranzas'] = cobranzas
return context
class OrdenPagoVerView(VariablesMixin,DetailView):
model = cpb_comprobante
pk_url_kwarg = 'id'
context_object_name = 'cpb'
template_name = 'general/facturas/detalle_op.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(OrdenPagoVerView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(OrdenPagoVerView, self).get_context_data(**kwargs)
try:
config = empresa_actual(self.request)
except gral_empresa.DoesNotExist:
config = None
cpb = self.object
context['config'] = config
detalle = cpb_comprobante_fp.objects.filter(cpb_comprobante=cpb).select_related('tipo_forma_pago','mdcp_banco','cta_ingreso','cta_egreso')
context['detalle'] = detalle
cobranzas = cpb_cobranza.objects.filter(cpb_comprobante=cpb,cpb_comprobante__estado__pk__lt=3).select_related('cpb_comprobante')
context['cobranzas'] = cobranzas
return context
class NCNDVerView(VariablesMixin,DetailView):
model = cpb_comprobante
pk_url_kwarg = 'id'
context_object_name = 'cpb'
template_name = 'general/facturas/detalle_ncnd.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(NCNDVerView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(NCNDVerView, self).get_context_data(**kwargs)
try:
config = empresa_actual(self.request)
except gral_empresa.DoesNotExist:
config = None
cpb = self.object
context['config'] = config
detalle_comprobante = cpb_comprobante_detalle.objects.filter(cpb_comprobante=cpb).select_related('producto','tasa_iva')
context['detalle_comprobante'] = detalle_comprobante
return context
class RemitoVerView(VariablesMixin,DetailView):
model = cpb_comprobante
pk_url_kwarg = 'id'
context_object_name = 'cpb'
template_name = 'general/facturas/detalle_remito.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(RemitoVerView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(RemitoVerView, self).get_context_data(**kwargs)
try:
config = empresa_actual(self.request)
except gral_empresa.DoesNotExist:
config = None
cpb = self.object
context['config'] = config
detalle_comprobante = cpb_comprobante_detalle.objects.filter(cpb_comprobante=cpb).select_related('producto','tasa_iva')
context['detalle_comprobante'] = detalle_comprobante
return context
class PresupVerView(VariablesMixin,DetailView):
model = cpb_comprobante
pk_url_kwarg = 'id'
context_object_name = 'cpb'
template_name = 'general/facturas/detalle_presup.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(PresupVerView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(PresupVerView, self).get_context_data(**kwargs)
try:
config = empresa_actual(self.request)
except gral_empresa.DoesNotExist:
config = None
cpb = self.object
context['config'] = config
detalle_comprobante = cpb_comprobante_detalle.objects.filter(cpb_comprobante=cpb).select_related('producto','tasa_iva')
context['detalle_comprobante'] = detalle_comprobante
return context
class MovimVerView(VariablesMixin,DetailView):
model = cpb_comprobante
pk_url_kwarg = 'id'
context_object_name = 'cpb'
template_name = 'general/facturas/detalle_movimiento.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(MovimVerView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(MovimVerView, self).get_context_data(**kwargs)
try:
config = empresa_actual(self.request)
except gral_empresa.DoesNotExist:
config = None
cpb = self.object
context['config'] = config
detalle = cpb_comprobante_fp.objects.filter(cpb_comprobante=cpb).select_related('tipo_forma_pago','mdcp_banco','cta_ingreso','cta_egreso')
context['detalle'] = detalle
return context
#************* PercImp **************
class PercImpView(VariablesMixin,ListView):
model = cpb_perc_imp
template_name = 'general/lista_perc_imp.html'
context_object_name = 'perc_imp'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
if not tiene_permiso(self.request,'gral_configuracion'):
return redirect(reverse('principal'))
return super(PercImpView, self).dispatch(*args, **kwargs)
def get_queryset(self):
try:
queryset = cpb_perc_imp.objects.filter(empresa__id__in=empresas_habilitadas(self.request))
except:
queryset = cpb_perc_imp.objects.none()
return queryset
class PercImpCreateView(VariablesMixin,AjaxCreateView):
form_class = PercImpForm
template_name = 'modal/general/form_perc_imp.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
if not tiene_permiso(self.request,'gral_configuracion'):
return redirect(reverse('principal'))
return super(PercImpCreateView, self).dispatch(*args, **kwargs)
def form_valid(self, form):
form.instance.empresa = empresa_actual(self.request)
messages.success(self.request, u'Los datos se guardaron con éxito!')
return super(PercImpCreateView, self).form_valid(form)
def get_initial(self):
initial = super(PercImpCreateView, self).get_initial()
return initial
class PercImpEditView(VariablesMixin,AjaxUpdateView):
form_class = PercImpForm
model = cpb_perc_imp
pk_url_kwarg = 'id'
template_name = 'modal/general/form_perc_imp.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
if not tiene_permiso(self.request,'gral_configuracion'):
return redirect(reverse('principal'))
return super(PercImpEditView, self).dispatch(*args, **kwargs)
def form_valid(self, form):
messages.success(self.request, u'Los datos se guardaron con éxito!')
return super(PercImpEditView, self).form_valid(form)
def get_initial(self):
initial = super(PercImpEditView, self).get_initial()
return initial
@login_required
def PercImpDeleteView(request, id):
try:
objeto = get_object_or_404(cpb_perc_imp, id=id)
if not tiene_permiso(request,'gral_configuracion'):
return redirect(reverse('principal'))
objeto.delete()
messages.success(request, u'¡Los datos se guardaron con éxito!')
except:
messages.error(request, u'¡Los datos no pudieron eliminarse!')
return redirect('percimp_listado')
#************* Retenciones ****
class RetencView(VariablesMixin,ListView):
model = cpb_retenciones
template_name = 'general/lista_retenc.html'
context_object_name = 'retenc'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
if not tiene_permiso(self.request,'gral_configuracion'):
return redirect(reverse('principal'))
return super(RetencView, self).dispatch(*args, **kwargs)
def get_queryset(self):
try:
queryset = cpb_retenciones.objects.filter(empresa__id__in=empresas_habilitadas(self.request))
except:
queryset = cpb_retenciones.objects.none()
return queryset
class RetencCreateView(VariablesMixin,AjaxCreateView):
form_class = RetencForm
template_name = 'modal/general/form_retenc.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
if not tiene_permiso(self.request,'gral_configuracion'):
return redirect(reverse('principal'))
return super(RetencCreateView, self).dispatch(*args, **kwargs)
def form_valid(self, form):
form.instance.empresa = empresa_actual(self.request)
messages.success(self.request, u'Los datos se guardaron con éxito!')
return super(RetencCreateView, self).form_valid(form)
def get_initial(self):
initial = super(RetencCreateView, self).get_initial()
return initial
class RetencEditView(VariablesMixin,AjaxUpdateView):
form_class = RetencForm
model = cpb_retenciones
pk_url_kwarg = 'id'
template_name = 'modal/general/form_retenc.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
if not tiene_permiso(self.request,'gral_configuracion'):
return redirect(reverse('principal'))
return super(RetencEditView, self).dispatch(*args, **kwargs)
def form_valid(self, form):
messages.success(self.request, u'Los datos se guardaron con éxito!')
return super(RetencEditView, self).form_valid(form)
def get_initial(self):
initial = super(RetencEditView, self).get_initial()
return initial
@login_required
def RetencDeleteView(request, id):
try:
objeto = get_object_or_404(cpb_retenciones, id=id)
if not tiene_permiso(request,'gral_configuracion'):
return redirect(reverse('principal'))
objeto.delete()
messages.success(request, u'¡Los datos se guardaron con éxito!')
except:
messages.error(request, u'¡Los datos no pudieron eliminarse!')
return redirect('retenc_listado')
#************* FormaPago **************
class FPView(VariablesMixin,ListView):
model = cpb_tipo_forma_pago
template_name = 'general/lista_formapago.html'
context_object_name = 'formapago'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
if not tiene_permiso(self.request,'gral_configuracion'):
return redirect(reverse('principal'))
return super(FPView, self).dispatch(*args, **kwargs)
def get_queryset(self):
try:
queryset = cpb_tipo_forma_pago.objects.filter(empresa__id__in=empresas_habilitadas(self.request))
except:
queryset = cpb_tipo_forma_pago.objects.none()
return queryset
class FPCreateView(VariablesMixin,AjaxCreateView):
form_class = FormaPagoForm
template_name = 'modal/general/form_formapago.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
if not tiene_permiso(self.request,'gral_configuracion'):
return redirect(reverse('principal'))
return super(FPCreateView, self).dispatch(*args, **kwargs)
def form_valid(self, form):
form.instance.empresa = empresa_actual(self.request)
messages.success(self.request, u'Los datos se guardaron con éxito!')
return super(FPCreateView, self).form_valid(form)
def get_initial(self):
initial = super(FPCreateView, self).get_initial()
return initial
def get_form_kwargs(self):
kwargs = super(FPCreateView, self).get_form_kwargs()
kwargs['request'] = self.request
return kwargs
class FPEditView(VariablesMixin,AjaxUpdateView):
form_class = FormaPagoForm
model = cpb_tipo_forma_pago
pk_url_kwarg = 'id'
template_name = 'modal/general/form_formapago.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
if not tiene_permiso(self.request,'gral_configuracion'):
return redirect(reverse('principal'))
return super(FPEditView, self).dispatch(*args, **kwargs)
def form_valid(self, form):
messages.success(self.request, u'Los datos se guardaron con éxito!')
return super(FPEditView, self).form_valid(form)
def get_initial(self):
initial = super(FPEditView, self).get_initial()
return initial
def get_form_kwargs(self):
kwargs = super(FPEditView, self).get_form_kwargs()
kwargs['request'] = self.request
return kwargs
@login_required
def FPDeleteView(request, id):
try:
objeto = get_object_or_404(cpb_tipo_forma_pago, id=id)
if not tiene_permiso(request,'gral_configuracion'):
return redirect(reverse('principal'))
objeto.delete()
messages.success(request, u'¡Los datos se guardaron con éxito!')
except:
messages.error(request, u'¡Los datos no pudieron eliminarse!')
return redirect('formapago_listado')
#************* Pto de Venta y sus Nros **************
class PtoVtaView(VariablesMixin,ListView):
model = cpb_pto_vta
template_name = 'general/pto_vta/pto_vta_listado.html'
context_object_name = 'pto_vta'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
if not tiene_permiso(self.request,'gral_configuracion'):
return redirect(reverse('principal'))
return super(PtoVtaView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(PtoVtaView, self).get_context_data(**kwargs)
try:
context['numeros'] = cpb_pto_vta_numero.objects.filter(cpb_pto_vta__in=pto_vta_habilitados(self.request)).select_related('cpb_tipo','cpb_pto_vta').order_by('cpb_pto_vta__numero','cpb_tipo__nombre','letra')
except:
context['numeros'] = None
return context
def get_queryset(self):
try:
empresa = empresa_actual(self.request)
usuario = usuario_actual(self.request)
queryset = cpb_pto_vta.objects.all().order_by('numero')
if empresa:
queryset = queryset.filter(empresa__id__in=empresas_habilitadas(self.request))
try:
if usuario.cpb_pto_vta:
queryset = queryset.filter(id=usuario.cpb_pto_vta.id)
except:
return queryset
return queryset
except:
queryset = cpb_pto_vta.objects.none()
return queryset
class PtoVtaCreateView(VariablesMixin,CreateView):
form_class = PtoVtaForm
model = cpb_pto_vta
template_name = 'general/pto_vta/pto_vta_form.html'
success_url = '/comprobantes/pto_vta'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
if not tiene_permiso(self.request,'gral_configuracion'):
return redirect(reverse('principal'))
return super(PtoVtaCreateView, self).dispatch(*args, **kwargs)
def form_valid(self, form):
form.instance.empresa = empresa_actual(self.request)
messages.success(self.request, u'Los datos se guardaron con éxito!')
return super(PtoVtaCreateView, self).form_valid(form)
def get_form_kwargs(self):
kwargs = super(PtoVtaCreateView, self).get_form_kwargs()
kwargs['request'] = self.request
return kwargs
def form_invalid(self, form):
return self.render_to_response(self.get_context_data(form=form))
def get_initial(self):
initial = super(PtoVtaCreateView, self).get_initial()
initial['request'] = self.request
return initial
class PtoVtaEditView(VariablesMixin,UpdateView):
form_class = PtoVtaEditForm
model = cpb_pto_vta
pk_url_kwarg = 'id'
template_name = 'general/pto_vta/pto_vta_form.html'
success_url = '/comprobantes/pto_vta'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
if not tiene_permiso(self.request,'gral_configuracion'):
return redirect(reverse('principal'))
return super(PtoVtaEditView, self).dispatch(*args, **kwargs)
def form_valid(self, form):
messages.success(self.request, u'Los datos se guardaron con éxito!')
return super(PtoVtaEditView, self).form_valid(form)
def get_form_kwargs(self):
kwargs = super(PtoVtaEditView, self).get_form_kwargs()
# kwargs['request'] = self.request
return kwargs
def get_context_data(self, **kwargs):
context = super(PtoVtaEditView, self).get_context_data(**kwargs)
try:
context['nro'] = self.get_object()
except:
context['nro'] = None
return context
def form_invalid(self, form):
return self.render_to_response(self.get_context_data(form=form))
def get_initial(self):
initial = super(PtoVtaEditView, self).get_initial()
# initial['request'] = self.request
return initial
@login_required
def pto_vta_baja_reactivar(request,id):
pto_vta = cpb_pto_vta.objects.get(pk=id)
pto_vta.baja = not pto_vta.baja
pto_vta.save()
messages.success(request, u'Los datos se guardaron con éxito!')
return HttpResponseRedirect(reverse("pto_vta_listado"))
@login_required
def pto_vta_numero_cambiar(request,id,nro):
pto_vta_numero = cpb_pto_vta_numero.objects.get(id=id,cpb_pto_vta__empresa=empresa_actual(request))
if pto_vta_numero:
pto_vta_numero.ultimo_nro = nro
pto_vta_numero.save()
messages.success(request, u'Los datos se guardaron con éxito!')
return HttpResponseRedirect(reverse('pto_vta_listado'))
#************* Disponibilidades **************
class DispoView(VariablesMixin,ListView):
model = cpb_cuenta
template_name = 'general/lista_dispo.html'
context_object_name = 'cuenta'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
if not tiene_permiso(self.request,'gral_configuracion'):
return redirect(reverse('principal'))
return super(DispoView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(DispoView, self).get_context_data(**kwargs)
return context
def get_queryset(self):
try:
queryset = cpb_cuenta.objects.filter(empresa__id__in=empresas_habilitadas(self.request))
except:
queryset = cpb_cuenta.objects.none()
return queryset
class DispoCreateView(VariablesMixin,AjaxCreateView):
form_class = DispoForm
template_name = 'modal/general/form_dispo.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
if not tiene_permiso(self.request,'gral_configuracion'):
return redirect(reverse('principal'))
return super(DispoCreateView, self).dispatch(*args, **kwargs)
def form_valid(self, form):
form.instance.empresa = empresa_actual(self.request)
messages.success(self.request, u'Los datos se guardaron con éxito!')
return super(DispoCreateView, self).form_valid(form)
def get_initial(self):
initial = super(DispoCreateView, self).get_initial()
return initial
def get_form_kwargs(self):
kwargs = super(DispoCreateView, self).get_form_kwargs()
kwargs['request'] = self.request
return kwargs
class DispoEditView(VariablesMixin,AjaxUpdateView):
form_class = DispoForm
model = cpb_cuenta
pk_url_kwarg = 'id'
template_name = 'modal/general/form_dispo.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
if not tiene_permiso(self.request,'gral_configuracion'):
return redirect(reverse('principal'))
if not self.get_object().modificable:
return redirect(reverse('disponibilidades_listado'))
return super(DispoEditView, self).dispatch(*args, **kwargs)
def form_valid(self, form):
messages.success(self.request, u'Los datos se guardaron con éxito!')
return super(DispoEditView, self).form_valid(form)
def get_initial(self):
initial = super(DispoEditView, self).get_initial()
initial['request'] = self.request
return initial
def get_form_kwargs(self):
kwargs = super(DispoEditView, self).get_form_kwargs()
kwargs['request'] = self.request
return kwargs
@login_required
def DispoDeleteView(request, id):
try:
objeto = get_object_or_404(cpb_cuenta, id=id)
if not tiene_permiso(request,'gral_configuracion'):
return redirect(reverse('principal'))
objeto.delete()
messages.success(request, u'¡Los datos se guardaron con éxito!')
except:
messages.error(request, u'¡Los datos no pudieron eliminarse!')
return redirect('disponibilidades_listado')
@login_required
def dispo_baja_reactivar(request,id):
cuenta = cpb_cuenta.objects.get(pk=id)
if not cuenta.modificable:
return redirect(reverse('disponibilidades_listado'))
cuenta.baja = not cuenta.baja
cuenta.save()
messages.success(self.request, u'Los datos se guardaron con éxito!')
return HttpResponseRedirect(reverse("disponibilidades_listado"))
@login_required
def SeleccionarChequesView(request):
if request.method == 'POST' and request.is_ajax():
cheque = request.POST.get('cheques', None)
response = []
if cheque:
cpb_fp = cpb_comprobante_fp.objects.filter(id=int(cheque))
response = list(cpb_fp.values('id','tipo_forma_pago__id','cta_ingreso__id','cta_egreso__id','mdcp_fecha','mdcp_banco__id','mdcp_cheque','importe','detalle'))
return HttpResponse(json.dumps(response,cls=DjangoJSONEncoder), content_type='application/json')
else:
id_cheques = request.GET.getlist('id_ch')
try:
id_cheques = [int(x) for x in request.GET.getlist('id_ch')]
except:
id_cheques = []
formCheques = FormCheques(request=request,id_cheques=id_cheques)
variables = RequestContext(request, {'formCheques':formCheques})
return render_to_response("general/varios/buscar_cheques.html", variables)
@login_required
def CobrarDepositarChequesView(request):
if request.method == 'POST' and request.is_ajax():
formCheques = FormChequesCobro(request.POST,request=request)
response = []
if formCheques.is_valid():
#HAGO LA MAGIA DE CREAR MOVIM Y DEMAS
try:
id_cheques = [int(x) for x in request.POST.getlist('id_fp')]
cheques = cpb_comprobante_fp.objects.filter(id__in=id_cheques)
total_cheques = cheques.aggregate(sum=Sum('importe'))['sum'] or 0
except:
id_cheques = []
cheques = None
total_cheques = 0
estado=cpb_estado.objects.get(pk=2)
tipo=cpb_tipo.objects.get(pk=13)
letra='X'
pto_vta=0
numero = ultimoNro(13,pto_vta,letra)
cuenta = formCheques.cleaned_data['cuenta']
fecha_cpb = formCheques.cleaned_data['fecha_cpb']
detalle=u"Detalle Cobranza cheques"
for c in cheques:
detalle = detalle+' '+str(c.mdcp_cheque)
movimiento = cpb_comprobante(cpb_tipo=tipo,estado=estado,pto_vta=pto_vta,letra=letra,numero=numero,fecha_cpb=fecha_cpb,importe_total=total_cheques,
usuario=usuario_actual(request),empresa = empresa_actual(request),fecha_imputacion=fecha_cpb)
movimiento.save()
tipo_fp=cpb_tipo_forma_pago.objects.get(pk=1)
cta_egreso = cpb_cuenta.objects.get(pk=4)
recibo_fp = cpb_comprobante_fp(cpb_comprobante=movimiento,tipo_forma_pago=tipo_fp,cta_egreso=cta_egreso,cta_ingreso=cuenta,mdcp_fecha=datetime.now(),importe=total_cheques,detalle=detalle)
recibo_fp.save()
for c in cheques:
c.mdcp_salida = recibo_fp
c.save()
response.append({'msj':u'¡Se registró el movimiento con éxito!','estado':0})
else:
response.append({'msj':u'¡No se pudieron procesar las cobranzas!','estado':1})
return HttpResponse(json.dumps(response,cls=DjangoJSONEncoder), content_type='application/json')
else:
try:
id_cheques = [int(x) for x in request.GET.getlist('id_fp')]
cheques = cpb_comprobante_fp.objects.filter(id__in=id_cheques)
total_cheques = cheques.aggregate(sum=Sum('importe'))['sum'] or 0
except:
id_cheques = []
cheques = None
total_cheques = 0
formCheques = FormChequesCobro(request=request)
variables = RequestContext(request, {'formCheques':formCheques,'total_cheques':total_cheques})
return render_to_response("general/varios/cobrar_cheques.html", variables)
@login_required
def imprimir_detalles(request):
limpiar_sesion(request)
id_cpbs = [int(x) for x in request.GET.getlist('id_cpb')]
cpbs_detalles = cpb_comprobante_detalle.objects.filter(cpb_comprobante__id__in=id_cpbs,cpb_comprobante__empresa = empresa_actual(request)).order_by('cpb_comprobante__fecha_cpb','producto__nombre')
context = {}
context = getVariablesMixin(request)
context['cpbs_detalles'] = cpbs_detalles
fecha = datetime.now()
context['fecha'] = fecha
template = 'reportes/varios/rep_detalle_cpbs.html'
return render_to_pdf_response(request, template, context)
###############################################################
from .forms import SaldoInicialForm
class SaldoInicialCreateView(VariablesMixin,AjaxCreateView):
form_class = SaldoInicialForm
template_name = 'modal/general/form_saldo_inicial.html'
model = cpb_tipo_forma_pago
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(SaldoInicialCreateView, self).dispatch(*args, **kwargs)
def get_initial(self):
initial = super(SaldoInicialCreateView, self).get_initial()
return initial
def get_form_kwargs(self,**kwargs):
kwargs = super(SaldoInicialCreateView, self).get_form_kwargs()
kwargs['request'] = self.request
return kwargs
def form_valid(self, form):
self.object = form.save(commit=False)
estado=cpb_estado.objects.get(pk=3)
tipo=cpb_tipo.objects.get(pk=27)
cpb = cpb_comprobante(cpb_tipo=tipo,pto_vta=0,letra="X",numero=0,fecha_cpb=self.object.mdcp_fecha,importe_iva=0,fecha_imputacion=self.object.mdcp_fecha,
importe_total=self.object.importe,estado=estado,usuario=usuario_actual(self.request),fecha_vto=self.object.mdcp_fecha,empresa = empresa_actual(self.request))
cpb.save()
self.object.cpb_comprobante = cpb
self.object.save()
messages.success(self.request, u'Los datos se guardaron con éxito!')
return super(SaldoInicialCreateView, self).form_valid(form)
@login_required
def SaldoInicialDeleteView(request, id):
try:
objeto = get_object_or_404(cpb_comprobante, id=id)
objeto.delete()
messages.success(request, u'¡Los datos se guardaron con éxito!')
except:
messages.error(request, u'¡Los datos no pudieron eliminarse!')
return redirect('caja_diaria')
```
#### File: IronWeb/egresos/views.py
```python
from django.template import RequestContext,Context
from django.shortcuts import render, redirect, get_object_or_404,render_to_response,HttpResponseRedirect,HttpResponse
from django.views.generic import TemplateView,ListView,CreateView,UpdateView,FormView,DetailView
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.db import connection
from datetime import datetime,date,timedelta
from django.utils import timezone
from dateutil.relativedelta import *
from .forms import *
from django.http import HttpResponseRedirect,HttpResponseForbidden,HttpResponse
from django.db.models import Q,Sum,Count
from comprobantes.models import *
import json
from decimal import *
from modal.views import AjaxCreateView,AjaxUpdateView,AjaxDeleteView
from django.contrib import messages
from general.utilidades import *
from general.views import VariablesMixin
from usuarios.views import tiene_permiso
from django.forms.models import inlineformset_factory,BaseInlineFormSet,formset_factory
from productos.models import prod_productos
from django.contrib.messages.views import SuccessMessageMixin
from django.core.serializers.json import DjangoJSONEncoder
from comprobantes.views import puedeEditarCPB,puedeEliminarCPB,ultimoNro,buscarDatosProd
from general.forms import ConsultaCpbs,ConsultaCpbsCompras,pto_vta_habilitados_list
from django.utils.functional import curry
class CPBCompraViewList(VariablesMixin,ListView):
model = cpb_comprobante
template_name = 'egresos/compras/cpb_compra_listado.html'
context_object_name = 'comprobantes'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
limpiar_sesion(self.request)
if not tiene_permiso(self.request,'cpb_compras'):
return redirect(reverse('principal'))
return super(CPBCompraViewList, self).dispatch(*args,**kwargs)
def get_context_data(self, **kwargs):
context = super(CPBCompraViewList, self).get_context_data(**kwargs)
try:
empresa = empresa_actual(self.request)
except gral_empresa.DoesNotExist:
empresa = None
form = ConsultaCpbsCompras(self.request.POST or None)
comprobantes = cpb_comprobante.objects.filter(cpb_tipo__tipo__in=[1,2,3,9,21,22,23],cpb_tipo__compra_venta='C',empresa=empresa)\
.select_related('estado','cpb_tipo','entidad','vendedor')
if form.is_valid():
entidad = form.cleaned_data['entidad']
fdesde = form.cleaned_data['fdesde']
fhasta = form.cleaned_data['fhasta']
pto_vta = form.cleaned_data['pto_vta']
estado = form.cleaned_data['estado']
letra = form.cleaned_data['letra']
if int(estado) == 1:
comprobantes = comprobantes.filter(estado__in=[1,2,3])
elif int(estado) == 2:
comprobantes = comprobantes.filter(estado__in=[3])
else:
comprobantes = comprobantes.filter(estado__in=[1,2])
if fdesde:
comprobantes= comprobantes.filter(fecha_cpb__gte=fdesde)
if fhasta:
comprobantes= comprobantes.filter(fecha_cpb__lte=fhasta)
if entidad:
comprobantes= comprobantes.filter(entidad__apellido_y_nombre__icontains=entidad)
if pto_vta:
comprobantes= comprobantes.filter(pto_vta=pto_vta)
if letra:
comprobantes= comprobantes.filter(letra=letra)
else:
cpbs= cpb_comprobante.objects.filter(cpb_tipo__tipo__in=[1,2,3,9,21,22,23],fecha_cpb__gte=inicioMesAnt(),fecha_cpb__lte=finMes()\
,estado__in=[1,2],cpb_tipo__compra_venta='C',empresa=empresa).select_related('estado','cpb_tipo','entidad','vendedor')
if len(cpbs)==0:
cpbs = comprobantes.filter(estado__in=[1,2])[:20]
comprobantes=cpbs
context['form'] = form
context['comprobantes'] = comprobantes
return context
def post(self, *args, **kwargs):
return self.get(*args, **kwargs)
class CPBCompraDetalleFormSet(BaseInlineFormSet):
pass
class CPBCompraPIFormSet(BaseInlineFormSet):
pass
class CPBCompraFPFormSet(BaseInlineFormSet):
pass
CPBDetalleFormSet = inlineformset_factory(cpb_comprobante, cpb_comprobante_detalle,form=CPBCompraDetalleForm,formset=CPBCompraDetalleFormSet, can_delete=True,extra=0,min_num=1)
CPBPIFormSet = inlineformset_factory(cpb_comprobante, cpb_comprobante_perc_imp,form=CPBCompraPercImpForm,formset=CPBCompraPIFormSet, can_delete=True,extra=0,min_num=1)
CPBFPFormSet = inlineformset_factory(cpb_comprobante, cpb_comprobante_fp,form=CPBFPForm,formset=CPBCompraFPFormSet, can_delete=True,extra=0,min_num=1)
class CPBCompraCreateView(VariablesMixin,CreateView):
form_class = CPBCompraForm
template_name = 'egresos/compras/cpb_compra_form.html'
model = cpb_comprobante
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
if not tiene_permiso(self.request,'cpb_compras_abm'):
return redirect(reverse('principal'))
return super(CPBCompraCreateView, self).dispatch(*args, **kwargs)
def get_initial(self):
initial = super(CPBCompraCreateView, self).get_initial()
initial['tipo_form'] = 'ALTA'
initial['titulo'] = 'Nuevo Comprobante'
initial['request'] = self.request
return initial
def get_form_kwargs(self):
kwargs = super(CPBCompraCreateView, self).get_form_kwargs()
kwargs['request'] = self.request
return kwargs
def get(self, request, *args, **kwargs):
self.object = None
form_class = self.get_form_class()
form = self.get_form(form_class)
CPBDetalleFormSet.form = staticmethod(curry(CPBCompraDetalleForm,request=request))
CPBPIFormSet.form = staticmethod(curry(CPBCompraPercImpForm,request=request))
CPBFPFormSet.form = staticmethod(curry(CPBFPForm,request=request))
compras_detalle = CPBDetalleFormSet(prefix='formDetalle')
compras_pi = CPBPIFormSet(prefix='formDetallePI')
cpb_fp = CPBFPFormSet(prefix='formFP')
return self.render_to_response(self.get_context_data(form=form,compras_detalle = compras_detalle,compras_pi=compras_pi,cpb_fp=cpb_fp))
def post(self, request, *args, **kwargs):
self.object = None
form_class = self.get_form_class()
form = self.get_form(form_class)
CPBDetalleFormSet.form = staticmethod(curry(CPBCompraDetalleForm,request=request))
CPBPIFormSet.form = staticmethod(curry(CPBCompraPercImpForm,request=request))
CPBFPFormSet.form = staticmethod(curry(CPBFPForm,request=request))
compras_detalle = CPBDetalleFormSet(self.request.POST,prefix='formDetalle')
compras_pi = CPBPIFormSet(self.request.POST,prefix='formDetallePI')
cpb_fp = CPBFPFormSet(self.request.POST,prefix='formFP')
condic_pago = int(self.request.POST.get('condic_pago'))
if form.is_valid() and compras_detalle.is_valid() and compras_pi.is_valid() and (cpb_fp.is_valid()or(condic_pago==1)):
return self.form_valid(form, compras_detalle,compras_pi,cpb_fp)
else:
return self.form_invalid(form, compras_detalle,compras_pi,cpb_fp)
def form_valid(self, form, compras_detalle,compras_pi,cpb_fp):
self.object = form.save(commit=False)
estado=cpb_estado.objects.get(pk=1)
self.object.estado=estado
self.object.empresa = empresa_actual(self.request)
self.object.usuario = usuario_actual(self.request)
if not self.object.fecha_vto:
self.object.fecha_vto=self.object.fecha_cpb
self.object.save()
compras_detalle.instance = self.object
compras_detalle.cpb_comprobante = self.object.id
compras_detalle.save()
if compras_pi:
compras_pi.instance = self.object
compras_pi.cpb_comprobante = self.object.id
compras_pi.save()
if cpb_fp and (self.object.condic_pago>1):
estado=cpb_estado.objects.get(pk=2)
tipo_cpb=cpb_tipo.objects.get(pk=12)
nro = ultimoNro(12,self.object.pto_vta,"X",self.object.entidad)
op = cpb_comprobante(cpb_tipo=tipo_cpb,entidad=self.object.entidad,pto_vta=self.object.pto_vta,letra="X",
numero=nro,fecha_cpb=self.object.fecha_cpb,importe_iva=self.object.importe_iva,
importe_total=self.object.importe_total,estado=estado,usuario=self.object.usuario,
fecha_imputacion=self.object.fecha_cpb,empresa = self.object.empresa)
op.save()
cobranza = cpb_cobranza(cpb_comprobante=op,cpb_factura=self.object,importe_total=self.object.importe_total,desc_rec=0)
cobranza.save()
cpb_fp.instance = op
cpb_fp.cpb_comprobante = op.id
self.object.estado=estado
cpb_fp.save()
self.object.save()
recalcular_saldo_cpb(self.object.pk)
messages.success(self.request, u'Los datos se guardaron con éxito!')
return HttpResponseRedirect(reverse('cpb_compra_listado'))
def form_invalid(self, form,compras_detalle,compras_pi,cpb_fp):
return self.render_to_response(self.get_context_data(form=form,compras_detalle = compras_detalle,compras_pi=compras_pi,cpb_fp=cpb_fp))
class CPBCompraEditView(VariablesMixin,SuccessMessageMixin,UpdateView):
form_class = CPBCompraForm
template_name = 'egresos/compras/cpb_compra_form.html'
model = cpb_comprobante
pk_url_kwarg = 'id'
success_message = "CPB was created successfully"
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
if not tiene_permiso(self.request,'cpb_compras_abm'):
return redirect(reverse('principal'))
if not puedeEditarCPB(self.get_object()):
messages.error(self.request, u'¡No puede editar un Comprobante con Pagos/Saldado!')
return redirect(reverse('cpb_compra_listado'))
return super(CPBCompraEditView, self).dispatch(*args, **kwargs)
def get_form_kwargs(self):
kwargs = super(CPBCompraEditView, self).get_form_kwargs()
kwargs['request'] = self.request
return kwargs
def get_initial(self):
initial = super(CPBCompraEditView, self).get_initial()
initial['tipo_form'] = 'EDICION'
initial['titulo'] = 'Editar Comprobante '+str(self.get_object())
initial['request'] = self.request
return initial
def get(self, request, *args, **kwargs):
self.object = self.get_object()
form_class = self.get_form_class()
form = self.get_form(form_class)
#Si se edita queda en cuenta corriente
form.condic_pago=1
form.fields['condic_pago'].widget.attrs['disabled'] = True
form.fields['entidad'].widget.attrs['disabled'] = True
form.fields['cpb_tipo'].widget.attrs['disabled'] = True
importes=cobros_cpb(self.object.id)
form.fields['importe_cobrado'].initial = importes
form.fields['cliente_categ_fiscal'].initial = self.object.entidad.fact_categFiscal
form.fields['cliente_descuento'].initial = self.object.entidad.dcto_general
CPBDetalleFormSet.form = staticmethod(curry(CPBCompraDetalleForm,request=request))
CPBPIFormSet.form = staticmethod(curry(CPBCompraPercImpForm,request=request))
compras_detalle = CPBDetalleFormSet(instance=self.object,prefix='formDetalle')
compras_pi = CPBPIFormSet(instance=self.object,prefix='formDetallePI')
return self.render_to_response(self.get_context_data(form=form,compras_detalle = compras_detalle,compras_pi=compras_pi))
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form_class = self.get_form_class()
form = self.get_form(form_class)
CPBDetalleFormSet.form = staticmethod(curry(CPBCompraDetalleForm,request=request))
CPBPIFormSet.form = staticmethod(curry(CPBCompraPercImpForm,request=request))
compras_detalle = CPBDetalleFormSet(self.request.POST,instance=self.object,prefix='formDetalle')
compras_pi = CPBPIFormSet(self.request.POST,instance=self.object,prefix='formDetallePI')
if form.is_valid() and compras_detalle.is_valid() and compras_pi.is_valid():
return self.form_valid(form, compras_detalle,compras_pi)
else:
return self.form_invalid(form, compras_detalle,compras_pi)
def form_invalid(self, form,compras_detalle,compras_pi):
return self.render_to_response(self.get_context_data(form=form,compras_detalle = compras_detalle,compras_pi=compras_pi))
def form_valid(self, form, compras_detalle,compras_pi):
self.object = form.save(commit=False)
if not self.object.fecha_vto:
self.object.fecha_vto=self.object.fecha_cpb
self.object.save()
compras_detalle.instance = self.object
compras_detalle.cpb_comprobante = self.object.id
compras_detalle.save()
if compras_pi:
compras_pi.instance = self.object
compras_pi.cpb_comprobante = self.object.id
compras_pi.save()
recalcular_saldo_cpb(self.object.pk)
messages.success(self.request, u'Los datos se guardaron con éxito!')
return HttpResponseRedirect(reverse('cpb_compra_listado'))
class CPBCompraClonarCreateView(VariablesMixin,CreateView):
form_class = CPBCompraForm
template_name = 'egresos/compras/cpb_compra_form.html'
model = cpb_comprobante
pk_url_kwarg = 'id'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
if not tiene_permiso(self.request,'cpb_compras_abm'):
return redirect(reverse('principal'))
return super(CPBCompraClonarCreateView, self).dispatch(*args, **kwargs)
def get_initial(self):
initial = super(CPBCompraClonarCreateView, self).get_initial()
initial['tipo_form'] = 'ALTA'
initial['titulo'] = 'Nuevo Comprobante Compras - Clonar CPB: %s' % self.get_object()
initial['request'] = self.request
return initial
def get_form_kwargs(self):
kwargs = super(CPBCompraClonarCreateView, self).get_form_kwargs()
kwargs['request'] = self.request
return kwargs
def get(self, request, *args, **kwargs):
self.object = None
form_class = self.get_form_class()
form = self.get_form(form_class)
cpb=self.get_object()
if cpb:
form.fields['id_cpb_padre'].initial = cpb.pk
form.fields['pto_vta'].initial = cpb.pto_vta
form.fields['cpb_tipo'].initial = cpb.cpb_tipo
form.fields['entidad'].initial = cpb.entidad
form.fields['importe_tasa1'].initial=cpb.importe_tasa1
form.fields['importe_tasa2'].initial=cpb.importe_tasa2
detalles = cpb_comprobante_detalle.objects.filter(cpb_comprobante=cpb)
det=[]
for c in detalles:
det.append({'producto': c.producto,'cantidad':c.cantidad,'detalle':c.detalle,'porc_dcto':c.porc_dcto,'tasa_iva':c.tasa_iva,
'coef_iva':c.coef_iva,'lista_precios':c.lista_precios,'importe_costo':c.importe_costo,'importe_unitario':c.importe_unitario,
'importe_subtotal':c.importe_subtotal,'importe_iva':c.importe_iva,'importe_total':c.importe_total,'origen_destino':c.origen_destino,'importe_tasa1':c.importe_tasa1,'importe_tasa2':c.importe_tasa2})
CPBDetalleFormSet = inlineformset_factory(cpb_comprobante, cpb_comprobante_detalle,form=CPBCompraDetalleForm,fk_name='cpb_comprobante',formset=CPBCompraDetalleFormSet, can_delete=True,extra=0,min_num=len(det))
else:
detalles = None
# ventas_detalle = CPBDetalleFormSet(prefix='formDetalle',initial=det)
# ventas_pi = CPBPIFormSet(prefix='formDetallePI')
# cpb_fp = CPBFPFormSet(prefix='formFP')
CPBDetalleFormSet.form = staticmethod(curry(CPBCompraDetalleForm,request=request,clonacion=True))
compras_detalle = CPBDetalleFormSet(prefix='formDetalle',initial=det)
CPBPIFormSet.form = staticmethod(curry(CPBCompraPercImpForm,request=request))
compras_pi = CPBPIFormSet(prefix='formDetallePI')
CPBFPFormSet.form = staticmethod(curry(CPBFPForm,request=request))
cpb_fp = CPBFPFormSet(prefix='formFP')
return self.render_to_response(self.get_context_data(form=form,compras_detalle = compras_detalle,compras_pi=compras_pi,cpb_fp=cpb_fp))
def post(self, request, *args, **kwargs):
self.object = None
form_class = self.get_form_class()
form = self.get_form(form_class)
CPBDetalleFormSet.form = staticmethod(curry(CPBCompraDetalleForm,request=request))
CPBPIFormSet.form = staticmethod(curry(CPBCompraPercImpForm,request=request))
CPBFPFormSet.form = staticmethod(curry(CPBFPForm,request=request))
compras_detalle = CPBDetalleFormSet(self.request.POST,prefix='formDetalle')
compras_pi = CPBPIFormSet(self.request.POST,prefix='formDetallePI')
cpb_fp = CPBFPFormSet(self.request.POST,prefix='formFP')
condic_pago = int(self.request.POST.get('condic_pago'))
if form.is_valid() and compras_detalle.is_valid() and compras_pi.is_valid() and (cpb_fp.is_valid()or(condic_pago==1)):
return self.form_valid(form, compras_detalle,compras_pi,cpb_fp)
else:
return self.form_invalid(form, compras_detalle,compras_pi,cpb_fp)
def form_valid(self, form, compras_detalle,compras_pi,cpb_fp):
self.object = form.save(commit=False)
estado=cpb_estado.objects.get(pk=1)
self.object.estado=estado
self.object.empresa = empresa_actual(self.request)
self.object.usuario = usuario_actual(self.request)
if not self.object.fecha_vto:
self.object.fecha_vto=self.object.fecha_cpb
self.object.save()
compras_detalle.instance = self.object
compras_detalle.cpb_comprobante = self.object.id
compras_detalle.save()
if compras_pi:
compras_pi.instance = self.object
compras_pi.cpb_comprobante = self.object.id
compras_pi.save()
if cpb_fp and (self.object.condic_pago>1):
estado=cpb_estado.objects.get(pk=2)
tipo_cpb=cpb_tipo.objects.get(pk=12)
nro = ultimoNro(12,self.object.pto_vta,"X",self.object.entidad)
op = cpb_comprobante(cpb_tipo=tipo_cpb,entidad=self.object.entidad,pto_vta=self.object.pto_vta,letra="X",
numero=nro,fecha_cpb=self.object.fecha_cpb,importe_iva=self.object.importe_iva,
importe_total=self.object.importe_total,estado=estado,empresa = empresa_actual(self.request))
op.save()
cobranza = cpb_cobranza(cpb_comprobante=op,cpb_factura=self.object,importe_total=self.object.importe_total,desc_rec=0)
cobranza.save()
cpb_fp.instance = op
cpb_fp.cpb_comprobante = op.id
self.object.estado=estado
cpb_fp.save()
self.object.save()
recalcular_saldo_cpb(self.object.pk)
messages.success(self.request, u'Los datos se guardaron con éxito!')
return HttpResponseRedirect(reverse('cpb_compra_listado'))
def form_invalid(self, form,compras_detalle,compras_pi,cpb_fp):
return self.render_to_response(self.get_context_data(form=form,compras_detalle = compras_detalle,compras_pi=compras_pi,cpb_fp=cpb_fp))
@login_required
def CPBCompraDeleteView(request, id):
try:
cpb = get_object_or_404(cpb_comprobante, id=id)
if not tiene_permiso(request,'cpb_compras_abm'):
return redirect(reverse('principal'))
if not puedeEliminarCPB(cpb):
messages.error(request, u'¡No puede editar un Comprobante con Pagos/Saldado!')
return redirect(reverse('cpb_compra_listado'))
cpb.delete()
messages.success(request, u'Los datos se guardaron con éxito!')
except:
messages.error(request, u'¡El Comprobante no existe/no pudo eliminarse!')
return redirect('cpb_compra_listado')
#*********************************************************************************
class CPBPagosViewList(VariablesMixin,ListView):
model = cpb_comprobante
template_name = 'egresos/ordenpago/cpb_rec_pago_listado.html'
context_object_name = 'comprobantes'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
limpiar_sesion(self.request)
if not tiene_permiso(self.request,'cpb_pagos'):
return redirect(reverse('principal'))
return super(CPBPagosViewList, self).dispatch(*args,**kwargs)
def get_context_data(self, **kwargs):
context = super(CPBPagosViewList, self).get_context_data(**kwargs)
try:
empresa = empresa_actual(self.request)
except gral_empresa.DoesNotExist:
empresa = None
form = ConsultaCpbsCompras(self.request.POST or None,empresa=empresa,request=self.request)
comprobantes = cpb_comprobante.objects.filter(cpb_tipo__tipo=7,empresa=empresa,estado__in=[1,2]).order_by('-fecha_cpb','-id')\
.annotate(cobranzas=Sum('cpb_cobranza_cpb__importe_total')).select_related('estado','cpb_tipo','entidad','vendedor')
if form.is_valid():
entidad = form.cleaned_data['entidad']
fdesde = form.cleaned_data['fdesde']
fhasta = form.cleaned_data['fhasta']
pto_vta = form.cleaned_data['pto_vta']
estado = form.cleaned_data['estado']
if int(estado) == 1:
comprobantes = cpb_comprobante.objects.filter(cpb_tipo__tipo=7,empresa=empresa,estado__in=[1,2,3]).annotate(cobranzas=Sum('cpb_cobranza_cpb__importe_total'))
elif int(estado) == 2:
comprobantes = cpb_comprobante.objects.filter(cpb_tipo__tipo=7,empresa=empresa,estado__in=[3]).annotate(cobranzas=Sum('cpb_cobranza_cpb__importe_total'))
if fdesde:
comprobantes= comprobantes.filter(Q(fecha_cpb__gte=fdesde))
if fhasta:
comprobantes= comprobantes.filter(Q(fecha_cpb__lte=fhasta))
if entidad:
comprobantes= comprobantes.filter(entidad__apellido_y_nombre__icontains=entidad)
if pto_vta:
comprobantes= comprobantes.filter(Q(pto_vta=pto_vta))
comprobantes = comprobantes.select_related('estado','cpb_tipo','entidad','vendedor')
else:
cpbs= comprobantes.filter(fecha_cpb__gte=inicioMesAnt(),fecha_cpb__lte=finMes()).select_related('estado','cpb_tipo','entidad','vendedor')
if len(cpbs)==0:
cpbs = comprobantes[:20]
comprobantes=cpbs
context['form'] = form
context['comprobantes'] = comprobantes
return context
def post(self, *args, **kwargs):
return self.get(*args, **kwargs)
class CPBPagosRetFormSet(BaseInlineFormSet):
pass
class CPBPagosFPFormSet(BaseInlineFormSet):
pass
class CPBPagosCPBFormSet(BaseInlineFormSet):
pass
PagosFPFormSet = inlineformset_factory(cpb_comprobante, cpb_comprobante_fp,form=CPBFPForm,formset=CPBPagosFPFormSet, can_delete=True,extra=0,min_num=1)
PagosCPBFormSet = inlineformset_factory(cpb_comprobante, cpb_cobranza, fk_name='cpb_comprobante',form=CPBPagoCPBForm,formset=CPBPagosCPBFormSet, can_delete=True,extra=0,min_num=1)
PagosRetFormSet = inlineformset_factory(cpb_comprobante, cpb_comprobante_retenciones,form=CPBPagoRetForm,formset=CPBPagosRetFormSet, can_delete=True,extra=0,min_num=1)
class CPBPagoCreateView(VariablesMixin,CreateView):
form_class = CPBPagoForm
template_name = 'egresos/ordenpago/cpb_rec_pago_form.html'
model = cpb_comprobante
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
if not tiene_permiso(self.request,'cpb_pagos_abm'):
return redirect(reverse('principal'))
return super(CPBPagoCreateView, self).dispatch(*args, **kwargs)
def get_initial(self):
initial = super(CPBPagoCreateView, self).get_initial()
initial['tipo_form'] = 'ALTA'
initial['request'] = self.request
return initial
def get_form_kwargs(self,**kwargs):
kwargs = super(CPBPagoCreateView, self).get_form_kwargs()
kwargs['request'] = self.request
return kwargs
def get(self, request, *args, **kwargs):
self.object = None
form_class = self.get_form_class()
form = self.get_form(form_class)
PagosFPFormSet.form = staticmethod(curry(CPBFPForm,request=request))
PagosRetFormSet.form = staticmethod(curry(CPBPagoRetForm,request=request))
cpb_fp = PagosFPFormSet(prefix='formFP')
cpb_ret = PagosRetFormSet(prefix='formRet')
return self.render_to_response(self.get_context_data(form=form,cpb_fp = cpb_fp,cpb_ret=cpb_ret))
def post(self, request, *args, **kwargs):
self.object = None
form_class = self.get_form_class()
form = self.get_form(form_class)
PagosFPFormSet.form = staticmethod(curry(CPBFPForm,request=request))
PagosRetFormSet.form = staticmethod(curry(CPBPagoRetForm,request=request))
cpb_fp = PagosFPFormSet(self.request.POST,prefix='formFP')
cpb_ret = PagosRetFormSet(self.request.POST,prefix='formRet')
if form.is_valid() and cpb_fp.is_valid() and cpb_ret.is_valid():
return self.form_valid(form, cpb_fp,cpb_ret)
else:
return self.form_invalid(form, cpb_fp,cpb_ret)
def form_valid(self, form, cpb_fp, cpb_ret):
self.object = form.save(commit=False)
estado=cpb_estado.objects.get(pk=2)
self.object.estado=estado
self.object.letra='X'
self.object.numero = ultimoNro(12,self.object.pto_vta,self.object.letra,self.object.entidad)
tipo=cpb_tipo.objects.get(pk=12)
self.object.cpb_tipo=tipo
self.object.empresa = empresa_actual(self.request)
self.object.usuario = usuario_actual(self.request)
self.object.fecha_imputacion=self.object.fecha_cpb
if not self.object.fecha_vto:
self.object.fecha_vto=self.object.fecha_cpb
self.object.save()
cpb_fp.instance = self.object
cpb_fp.cpb_comprobante = self.object.id
cpb_fp.save()
if cpb_ret:
cpb_ret.instance = self.object
cpb_ret.cpb_comprobante = self.object.id
cpb_ret.save()
for f in cpb_fp:
datos = f.cleaned_data
id= datos.get('origen')
if id:
cheque= cpb_comprobante_fp.objects.get(id=id)
cpb=cpb_comprobante_fp.objects.get(id=f.instance.pk)
cheque.mdcp_salida = cpb
cheque.save()
recalcular_saldo_cpb(self.object.pk)
messages.success(self.request, u'Los datos se guardaron con éxito!')
return HttpResponseRedirect(reverse('cpb_pago_listado'))
def form_invalid(self, form,cpb_fp,cpb_ret):
return self.render_to_response(self.get_context_data(form=form,cpb_fp = cpb_fp, cpb_ret=cpb_ret))
class CPBPagoEditView(VariablesMixin,CreateView):
form_class = CPBPagoForm
template_name = 'egresos/ordenpago/cpb_rec_pago_form.html'
model = cpb_comprobante
pk_url_kwarg = 'id'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
if not tiene_permiso(self.request,'cpb_pagos_abm'):
return redirect(reverse('principal'))
if not puedeEditarCPB(self.get_object()):
messages.error(self.request, u'¡No puede editar un Comprobante asociado!')
return redirect(reverse('cpb_pago_listado'))
return super(CPBPagoEditView, self).dispatch(*args, **kwargs)
def get_initial(self):
initial = super(CPBPagoEditView, self).get_initial()
initial['tipo_form'] = 'EDICION'
initial['request'] = self.request
return initial
def get(self, request, *args, **kwargs):
self.object = self.get_object()
form_class = self.get_form_class()
form = self.get_form(form_class)
form.fields['entidad'].widget.attrs['disabled'] = True
form.fields['pto_vta'].widget.attrs['disabled'] = True
PagosFPFormSet.form = staticmethod(curry(CPBFPForm,request=request))
PagosRetFormSet.form = staticmethod(curry(CPBPagoRetForm,request=request))
cpb_fp = PagosFPFormSet(instance=self.object,prefix='formFP')
cpb_ret = PagosRetFormSet(instance=self.object,prefix='formRet')
cpbs_pagos=cpb_cobranza.objects.filter(cpb_comprobante=self.object.id,cpb_comprobante__estado__pk__lt=3)
PagosCPBFormSet = inlineformset_factory(cpb_comprobante, cpb_cobranza, fk_name='cpb_comprobante',form=CPBPagoCPBForm,formset=CPBPagosCPBFormSet,extra=len(cpbs_pagos), can_delete=False,max_num=len(cpbs_pagos))
d=[]
for cpb in cpbs_pagos:
c = cpb.cpb_factura
entidad = c.entidad
d.append({'detalle_cpb': c.get_cpb_tipo,'desc_rec':'0','importe_total':cpb.importe_total,'saldo':c.saldo,'id_cpb_factura':c.id,'cpb_factura':c})
cpbs = PagosCPBFormSet(prefix='formCPB',initial=d)
return self.render_to_response(self.get_context_data(form=form,cpb_fp=cpb_fp,cpbs=cpbs,cpb_ret=cpb_ret))
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form_class = self.get_form_class()
form = self.get_form(form_class)
PagosFPFormSet.form = staticmethod(curry(CPBFPForm,request=request))
PagosRetFormSet.form = staticmethod(curry(CPBPagoRetForm,request=request))
cpb_fp = PagosFPFormSet(self.request.POST,instance=self.object,prefix='formFP')
cpb_ret = PagosRetFormSet(self.request.POST,instance=self.object,prefix='formRet')
cpbs = PagosCPBFormSet(self.request.POST,instance=self.object,prefix='formCPB')
if form.is_valid() and cpb_fp.is_valid() and cpb_ret.is_valid() and cpbs.is_valid():
return self.form_valid(form, cpb_fp,cpbs,cpb_ret)
else:
return self.form_invalid(form, cpb_fp,cpbs,cpb_ret)
def form_valid(self, form, cpb_fp,cpbs,cpb_ret):
self.object = form.save(commit=False)
self.object.fecha_imputacion=self.object.fecha_cpb
if not self.object.fecha_vto:
self.object.fecha_vto=self.object.fecha_cpb
self.object.save()
cpb_fp.instance = self.object
cpb_fp.cpb_comprobante = self.object.id
cpb_fp.save()
if cpb_ret:
cpb_ret.instance = self.object
cpb_ret.cpb_comprobante = self.object.id
cpb_ret.save()
for fp in cpb_fp:
if fp.cleaned_data.get("origen"):
origen = fp.cleaned_data.get("origen")
c = cpb_comprobante_fp.objects.get(id=origen)
c.mdcp_salida = fp.instance
c.save()
cpbs=cpb_cobranza.objects.filter(cpb_comprobante=self.object.id)
for c in cpbs:
recalcular_saldo_cpb(c.cpb_factura.pk)
recalcular_saldo_cpb(self.object.pk)
messages.success(self.request, u'Los datos se guardaron con éxito!')
return HttpResponseRedirect(reverse('cpb_pago_listado'))
def get_form_kwargs(self):
kwargs = super(CPBPagoEditView, self).get_form_kwargs()
kwargs['request'] = self.request
return kwargs
def form_invalid(self, form,cpb_fp,cpbs,cpb_ret):
return self.render_to_response(self.get_context_data(form=form,cpb_fp = cpb_fp,cpbs=cpbs,cpb_ret=cpb_ret))
@login_required
def CPBPagoDeleteView(request, id):
try:
cpb = get_object_or_404(cpb_comprobante, id=id)
if not tiene_permiso(request,'cpb_pagos_abm'):
return redirect(reverse('principal'))
fps = cpb_comprobante_fp.objects.filter(cpb_comprobante=cpb,mdcp_salida__isnull=False).values_list('mdcp_salida',flat=True)
if (len(fps)>0):
messages.error(request, u'¡El Comprobante posee movimientos de cobranza/depósito de Cheques asociados!. Verifique')
return HttpResponseRedirect(cpb.get_listado())
# if (cpb.tiene_cobranzasREC_OP()):
# messages.error(request, u'¡El Comprobante posee movimientos de cobro/pago asociados!.Verifique')
# return HttpResponseRedirect(cpb.get_listado())
else:
#traigo los fps de los recibos asociados
pagos = cpb_comprobante_fp.objects.filter(cpb_comprobante=cpb).values_list('id',flat=True)
id_pagos = [int(x) for x in pagos]
cpbs = cpb_comprobante_fp.objects.filter(mdcp_salida__in=id_pagos)
for c in cpbs:
c.mdcp_salida = None
c.save()
cpb.delete()
messages.success(request, u'Los datos se guardaron con éxito!')
except:
messages.error(request, u'No se pudo eliminar el Comprobante!')
return redirect('cpb_pago_listado')
###############################################################
class CPBPagarCreateView(VariablesMixin,CreateView):
form_class = CPBPagoForm
template_name = 'egresos/ordenpago/cpb_rec_pago_form.html'
model = cpb_comprobante
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
if not tiene_permiso(self.request,'cpb_compras_pagar'):
return redirect(reverse('principal'))
return super(CPBPagarCreateView, self).dispatch(*args, **kwargs)
def get_initial(self):
initial = super(CPBPagarCreateView, self).get_initial()
initial['tipo_form'] = 'ALTA'
initial['request'] = self.request
return initial
def get_form_kwargs(self,**kwargs):
kwargs = super(CPBPagarCreateView, self).get_form_kwargs()
kwargs['request'] = self.request
return kwargs
def get(self, request, *args, **kwargs):
self.object = None
form_class = self.get_form_class()
form = self.get_form(form_class)
form.fields['entidad'].widget.attrs['disabled'] = True
cpbs_pagos = request.session.get('cpbs_pagos', None)
entidad = None
total = Decimal(0.00)
if cpbs_pagos:
cpbs_pagos = json.loads(cpbs_pagos)
PagosCPBFormSet = inlineformset_factory(cpb_comprobante, cpb_cobranza, fk_name='cpb_comprobante',form=CPBPagoCPBForm,formset=CPBPagosCPBFormSet,extra=len(cpbs_pagos), can_delete=False,max_num=len(cpbs_pagos))
d=[]
for cpb in cpbs_pagos:
c = cpb_comprobante.objects.get(id=cpb['id_cpb_factura'])
entidad = c.entidad
d.append({'detalle_cpb': c.get_cpb_tipo,'desc_rec':'0','importe_total':cpb['importe_total'],'saldo':c.saldo,'id_cpb_factura':c.id,'cpb_factura':c})
total += Decimal(cpb['importe_total'])
cpbs = PagosCPBFormSet(prefix='formCPB',initial=d)
if entidad:
form.fields['entidad'].initial = entidad
else:
PagosCPBFormSet = inlineformset_factory(cpb_comprobante, cpb_cobranza, fk_name='cpb_comprobante',form=CPBPagoCPBForm,formset=CPBPagosCPBFormSet, can_delete=True,extra=0,min_num=1)
cpbs = PagosCPBFormSet(prefix='formCPB')
PagosFPFormSet.form = staticmethod(curry(CPBFPForm,request=request))
PagosRetFormSet.form = staticmethod(curry(CPBPagoRetForm,request=request))
cpb_fp = PagosFPFormSet(prefix='formFP',initial=[{'importe':total}])
cpb_ret = PagosRetFormSet(prefix='formRet')
return self.render_to_response(self.get_context_data(form=form,cpb_fp=cpb_fp,cpbs=cpbs,cpb_ret=cpb_ret))
def post(self, request, *args, **kwargs):
self.object = None
form_class = self.get_form_class()
form = self.get_form(form_class)
PagosFPFormSet.form = staticmethod(curry(CPBFPForm,request=request))
PagosRetFormSet.form = staticmethod(curry(CPBPagoRetForm,request=request))
cpb_fp = PagosFPFormSet(self.request.POST,prefix='formFP')
cpbs = PagosCPBFormSet(self.request.POST,prefix='formCPB')
cpb_ret = PagosRetFormSet(self.request.POST,prefix='formRet')
if form.is_valid() and cpb_fp.is_valid() and cpbs.is_valid()and cpb_ret.is_valid():
return self.form_valid(form, cpb_fp,cpbs,cpb_ret)
else:
return self.form_invalid(form, cpb_fp,cpbs, cpb_ret)
def form_valid(self, form, cpb_fp,cpbs,cpb_ret):
self.object = form.save(commit=False)
estado=cpb_estado.objects.get(pk=2)
self.object.estado=estado
self.object.letra='X'
self.object.numero = ultimoNro(12,self.object.pto_vta,self.object.letra,self.object.entidad)
tipo=cpb_tipo.objects.get(pk=12)
self.object.cpb_tipo=tipo
self.object.empresa = empresa_actual(self.request)
self.object.usuario = usuario_actual(self.request)
self.object.fecha_imputacion=self.object.fecha_cpb
if not self.object.fecha_vto:
self.object.fecha_vto=self.object.fecha_cpb
self.object.save()
cpb_fp.instance = self.object
cpb_fp.cpb_comprobante = self.object.id
cpb_fp.save()
if cpb_ret:
cpb_ret.instance = self.object
cpb_ret.cpb_comprobante = self.object.id
cpb_ret.save()
for fp in cpb_fp:
if fp.cleaned_data['origen']:
origen = fp.cleaned_data['origen']
c = cpb_comprobante_fp.objects.get(id=origen)
c.mdcp_salida = fp.instance
c.save()
# estado=cpb_estado.objects.get(pk=2)
# self.object.estado=estado
cpbs.instance = self.object
c = cpb_comprobante.objects.get(id=self.object.id)
cpbs.cpb_comprobante = c
cpbs.desc_rec=0
cpbs.save()
for c in cpbs:
recalcular_saldo_cpb(c.instance.cpb_factura.pk)
limpiar_sesion(self.request)
recalcular_saldo_cpb(self.object.pk)
messages.success(self.request, u'Los datos se guardaron con éxito!')
return HttpResponseRedirect(reverse('cpb_compra_listado'))
def form_invalid(self, form,cpb_fp,cpbs,cpb_ret):
cpbs_pagos = self.request.session.get('cpbs_pagos', None)
entidad = None
if cpbs_pagos:
cpbs_pagos = json.loads(cpbs_pagos)
PagosCPBFormSet = inlineformset_factory(cpb_comprobante, cpb_cobranza, fk_name='cpb_comprobante',form=CPBPagoCPBForm,formset=CPBPagosCPBFormSet,extra=len(cpbs_pagos), can_delete=False,max_num=len(cpbs_pagos))
d=[]
for cpb in cpbs_pagos:
c = cpb_comprobante.objects.get(id=cpb['id_cpb_factura'])
entidad = c.entidad
d.append({'detalle_cpb': c.get_cpb_tipo,'desc_rec':'0','importe_total':cpb['importe_total'],'saldo':c.saldo,'id_cpb_factura':c.id,'cpb_factura':c})
cpbs = PagosCPBFormSet(prefix='formCPB',initial=d)
if entidad:
form.fields['entidad'].initial = entidad
return self.render_to_response(self.get_context_data(form=form,cpb_fp = cpb_fp,cpbs=cpbs,cpb_ret=cpb_ret))
@login_required
def CPBPagosSeleccionarView(request):
limpiar_sesion(request)
if request.method == 'POST' and request.is_ajax():
CPBSFormSet = formset_factory(CPBSeleccionados,extra=0)
comprobantes = CPBSFormSet(request.POST,prefix='comprobantes')
if comprobantes.is_valid():
d=[]
for c in comprobantes:
f = c.cleaned_data
d.append({'detalle_cpb':f['detalle_cpb'],'desc_rec':'0','id_cpb_factura':f['id_cpb_factura'],'importe_total':f['importe_total'],'saldo':f['saldo']})
d = json.dumps(d,default=default)
request.session['cpbs_pagos'] = d
response = {'status': 1, 'message': "Ok"} # for ok
else:
response = {'status': 0, 'message': "Verifique que los Totales no superen a los Saldos!"}
return HttpResponse(json.dumps(response,default=default), content_type='application/json')
else:
id_cpbs = request.GET.getlist('id_cpb')
cpbs = cpb_comprobante.objects.filter(id__in=id_cpbs).filter(Q(saldo__gt=0,cpb_tipo__id__in=[2,4,6,18]))
cant_cpbs = cpbs.count()
if cant_cpbs <= 0:
return HttpResponseRedirect(reverse('cpb_venta_listado'))
total=0
d=[]
for c in cpbs:
saldo = (c.saldo * c.cpb_tipo.signo_ctacte)
total += saldo
d.append({'detalle_cpb': c.get_cpb_tipo,'desc_rec':'0','importe_total':saldo,'saldo':saldo,'id_cpb_factura':c.id})
CPBSFormSet = formset_factory(CPBSeleccionados, max_num=cant_cpbs,can_delete=False)
comprobantes = CPBSFormSet(prefix='comprobantes',initial=d)
variables = RequestContext(request, {'comprobantes':comprobantes,'total':total})
return render_to_response("egresos/compras/detalle_cpbs.html", variables)
################################################################
#*********************************************************************************
class CPBRemitoDetalleFormSet(BaseInlineFormSet):
pass
CPBRemitoDetalleFS = inlineformset_factory(cpb_comprobante, cpb_comprobante_detalle,form=CPBRemitoDetalleForm,formset=CPBRemitoDetalleFormSet, can_delete=True,extra=0,min_num=1)
class CPBRemitoCViewList(VariablesMixin,ListView):
model = cpb_comprobante
template_name = 'egresos/remitos/cpb_remito_listado.html'
context_object_name = 'comprobantes'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
limpiar_sesion(self.request)
if not tiene_permiso(self.request,'cpb_remitosc'):
return redirect(reverse('principal'))
return super(CPBRemitoCViewList, self).dispatch(*args,**kwargs)
def get_context_data(self, **kwargs):
context = super(CPBRemitoCViewList, self).get_context_data(**kwargs)
try:
empresa = empresa_actual(self.request)
except gral_empresa.DoesNotExist:
empresa = None
form = ConsultaCpbsCompras(self.request.POST or None,empresa=empresa,request=self.request)
comprobantes = cpb_comprobante.objects.filter(cpb_tipo__tipo=5,cpb_tipo__compra_venta='C',estado__in=[1,2],empresa=empresa).order_by('-fecha_cpb','-id').select_related('estado','cpb_tipo','entidad')
if form.is_valid():
entidad = form.cleaned_data['entidad']
fdesde = form.cleaned_data['fdesde']
fhasta = form.cleaned_data['fhasta']
pto_vta = form.cleaned_data['pto_vta']
vendedor = form.cleaned_data['vendedor']
estado = form.cleaned_data['estado']
if int(estado) == 1:
comprobantes = cpb_comprobante.objects.filter(cpb_tipo__tipo=5,cpb_tipo__compra_venta='C',estado__in=[1,2,3],empresa=empresa).order_by('-fecha_cpb','-id').select_related('estado','cpb_tipo','entidad')
if fdesde:
comprobantes= comprobantes.filter(Q(fecha_cpb__gte=fdesde))
if fhasta:
comprobantes= comprobantes.filter(Q(fecha_cpb__lte=fhasta))
if entidad:
comprobantes= comprobantes.filter(entidad__apellido_y_nombre__icontains=entidad)
if pto_vta:
comprobantes= comprobantes.filter(Q(pto_vta=pto_vta))
else:
cpbs= comprobantes.filter(fecha_cpb__gte=inicioMesAnt(),fecha_cpb__lte=finMes())
if len(cpbs)==0:
cpbs = comprobantes[:20]
comprobantes=cpbs
context['form'] = form
context['comprobantes'] = comprobantes
return context
def post(self, *args, **kwargs):
return self.get(*args, **kwargs)
@login_required
def CPBRemitoCDeleteView(request, id):
try:
cpb = get_object_or_404(cpb_comprobante, id=id)
if not tiene_permiso(request,'cpb_remitos'):
return redirect(reverse('principal'))
cpb.delete()
messages.success(request, u'Los datos se guardaron con éxito!')
except:
messages.error(request, u'¡El Comprobante no existe/no pudo eliminarse!')
return redirect('cpb_remitoc_listado')
class CPBRemitoCCreateView(VariablesMixin,CreateView):
form_class = CPBRemitoForm
template_name = 'egresos/remitos/cpb_remito_form.html'
model = cpb_comprobante
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
if not tiene_permiso(self.request,'cpb_remitosc_abm'):
return redirect(reverse('principal'))
return super(CPBRemitoCCreateView, self).dispatch(*args, **kwargs)
def get_initial(self):
initial = super(CPBRemitoCCreateView, self).get_initial()
initial['tipo_form'] = 'ALTA'
initial['request'] = self.request
return initial
def get_form_kwargs(self):
kwargs = super(CPBRemitoCCreateView, self).get_form_kwargs()
kwargs['request'] = self.request
return kwargs
def get(self, request, *args, **kwargs):
self.object = None
form_class = self.get_form_class()
form = self.get_form(form_class)
CPBRemitoDetalleFS = inlineformset_factory(cpb_comprobante, cpb_comprobante_detalle,fk_name='cpb_comprobante',form=CPBRemitoDetalleForm,formset=CPBRemitoDetalleFormSet, can_delete=True,extra=0,min_num=1)
CPBRemitoDetalleFS.form = staticmethod(curry(CPBRemitoDetalleForm,request=request))
remito_detalle = CPBRemitoDetalleFS(prefix='formDetalle')
return self.render_to_response(self.get_context_data(form=form,remito_detalle = remito_detalle))
def post(self, request, *args, **kwargs):
self.object = None
form_class = self.get_form_class()
form = self.get_form(form_class)
CPBRemitoDetalleFS.form = staticmethod(curry(CPBRemitoDetalleForm,request=request))
remito_detalle = CPBRemitoDetalleFS(self.request.POST,prefix='formDetalle')
if form.is_valid() and remito_detalle.is_valid():
return self.form_valid(form, remito_detalle)
else:
return self.form_invalid(form, remito_detalle)
def form_valid(self, form, remito_detalle):
self.object = form.save(commit=False)
estado=cpb_estado.objects.get(pk=1)
self.object.estado=estado
tipo=cpb_tipo.objects.get(pk=9)
self.object.empresa = empresa_actual(self.request)
self.object.usuario = usuario_actual(self.request)
self.object.letra = 'X'
self.object.fecha_imputacion=self.object.fecha_cpb
if not self.object.fecha_vto:
self.object.fecha_vto=self.object.fecha_cpb
self.object.cpb_tipo=tipo
self.object.save()
remito_detalle.instance = self.object
remito_detalle.cpb_comprobante = self.object.id
remito_detalle.save()
recalcular_saldo_cpb(self.object.pk)
messages.success(self.request, u'Los datos se guardaron con éxito!')
return HttpResponseRedirect(reverse('cpb_remitoc_listado'))
def form_invalid(self, form,remito_detalle):
return self.render_to_response(self.get_context_data(form=form,remito_detalle = remito_detalle))
def get_success_url(self):
return reverse('cpb_remitoc_listado')
class CPBRemitoCEditView(VariablesMixin,UpdateView):
form_class = CPBRemitoForm
template_name = 'egresos/remitos/cpb_remito_form.html'
model = cpb_comprobante
pk_url_kwarg = 'id'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
if not tiene_permiso(self.request,'cpb_remitosc_abm'):
return redirect(reverse('principal'))
return super(CPBRemitoCEditView, self).dispatch(*args, **kwargs)
def get_form_kwargs(self):
kwargs = super(CPBRemitoCEditView, self).get_form_kwargs()
kwargs['request'] = self.request
return kwargs
def get(self, request, *args, **kwargs):
self.object = self.get_object()
form_class = self.get_form_class()
form = self.get_form(form_class)
CPBRemitoDetalleFS.form = staticmethod(curry(CPBRemitoDetalleForm,request=request))
remito_detalle = CPBRemitoDetalleFS(instance=self.object,prefix='formDetalle')
return self.render_to_response(self.get_context_data(form=form,remito_detalle = remito_detalle))
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form_class = self.get_form_class()
form = self.get_form(form_class)
CPBRemitoDetalleFS.form = staticmethod(curry(CPBRemitoDetalleForm,request=request))
remito_detalle = CPBRemitoDetalleFS(self.request.POST,instance=self.object,prefix='formDetalle')
if form.is_valid() and remito_detalle.is_valid():
return self.form_valid(form, remito_detalle)
else:
return self.form_invalid(form, remito_detalle)
def form_invalid(self, form,remito_detalle):
return self.render_to_response(self.get_context_data(form=form,remito_detalle = remito_detalle))
def form_valid(self, form, remito_detalle):
self.object = form.save(commit=False)
self.object.fecha_imputacion=self.object.fecha_cpb
if not self.object.fecha_vto:
self.object.fecha_vto=self.object.fecha_cpb
self.object.save()
remito_detalle.instance = self.object
remito_detalle.cpb_comprobante = self.object.id
remito_detalle.save()
recalcular_saldo_cpb(self.object.pk)
messages.success(self.request, u'Los datos se guardaron con éxito!')
return HttpResponseRedirect(reverse('cpb_remitoc_listado'))
def get_initial(self):
initial = super(CPBRemitoCEditView, self).get_initial()
initial['tipo_form'] = 'EDICION'
initial['titulo'] = 'Editar Remito '+str(self.get_object())
initial['request'] = self.request
return initial
```
#### File: IronWeb/felectronica/forms.py
```python
from django import forms
from comprobantes.models import cpb_pto_vta,cpb_tipo_forma_pago,cpb_cuenta,cpb_tipo,cpb_nro_afip
from general.utilidades import *
from general.forms import pto_vta_habilitados
from general.models import gral_empresa
_SINO = (
(1, u'S'),
(2, u'N'),
)
class ImportarCPBSForm(forms.Form):
archivo = forms.FileField(label='Seleccione un archivo',required=True)
migra = forms.ChoiceField(label=u'¿Crear CPBs Faltantes?',choices=SINO,required=True,initial=2)
empresa = forms.ModelChoiceField(queryset=gral_empresa.objects.all(),empty_label=None,required=True)
# tipo_entidad = forms.ChoiceField(label=u'Tipo Entidad',choices=TIPO_ENTIDAD,required=True,initial=1)
def __init__(self, *args, **kwargs):
request = kwargs.pop('request', None)
super(ImportarCPBSForm, self).__init__(*args, **kwargs)
try:
empresas = empresas_buscador(request)
self.fields['empresa'].queryset = empresas
except:
pass
def clean(self):
archivo = self.cleaned_data.get('archivo')
if archivo:
if not archivo.name.endswith('.csv'):
self.add_error("archivo",u'¡El archivo debe tener extensión .CSV!')
#if file is too large, return
if archivo.multiple_chunks():
self.add_error("archivo",u"El archivo es demasiado grande (%.2f MB)." % (archivo.size/(1000*1000),))
return self.cleaned_data
class RecuperarCPBS(forms.Form):
cpb_tipo = forms.ModelChoiceField(label='Tipo CPB',queryset=cpb_nro_afip.objects.all(),required = True,empty_label=None)
pto_vta = forms.IntegerField(label='Pto. Vta.',required = True)
generar = forms.ChoiceField(label=u'¿Crear CPBs?',choices=_SINO,required=True,initial=2)
def __init__(self, *args, **kwargs):
empresa = kwargs.pop('empresa', None)
request = kwargs.pop('request', None)
super(RecuperarCPBS, self).__init__(*args, **kwargs)
# self.fields['pto_vta'].queryset = pto_vta_habilitados(request)
class ConsultaCPB(forms.Form):
cpb_tipo = forms.ModelChoiceField(label='Tipo CPB',queryset=cpb_nro_afip.objects.all(),required = True,empty_label=None)
pto_vta = forms.IntegerField(label='Pto. Vta.',required = True)
numero = forms.IntegerField(label=u'Numero CPB',required = True)
def __init__(self, *args, **kwargs):
empresa = kwargs.pop('empresa', None)
request = kwargs.pop('request', None)
super(ConsultaCPB, self).__init__(*args, **kwargs)
# self.fields['pto_vta'].queryset = pto_vta_habilitados(request)
```
#### File: IronWeb/general/views.py
```python
from django.template import RequestContext,Context
from django.shortcuts import *
from .models import *
from .utilidades import *
from django.views.generic import TemplateView,ListView,CreateView,UpdateView,FormView
from django.conf import settings
from django.db.models import Q,Sum,Count,F
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.db import connection
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response,redirect
from modal.views import AjaxCreateView,AjaxUpdateView,AjaxDeleteView
from django.contrib import messages
import json
import urllib
from .forms import EmpresaForm,TareasForm,pto_vta_habilitados,pto_vta_habilitados_list
from comprobantes.models import cpb_comprobante,cpb_comprobante_detalle
from entidades.models import egr_entidad
from productos.models import prod_productos,prod_producto_lprecios
from trabajos.models import orden_pedido,orden_trabajo
from usuarios.views import tiene_permiso,ver_permisos
from django.db.models import DecimalField,Func
from django.core.serializers.json import DjangoJSONEncoder
##############################################
# Mixin para cargar las Vars de sistema #
##############################################
def ultimoNroId(tabla):
ultimo = tabla.objects.latest('id').id
return ultimo
@login_required
def buscarDatosAPICUIT(request):
try:
cuit = request.GET['cuit']
data = urllib.urlopen(URL_API+cuit).read()
d = json.loads(data)
imp = [x['idImpuesto'] for x in d['impuesto']]
if (10 in imp):
id_cat=1
elif (11 in imp):
id_cat=1
elif (30 in imp):
id_cat=1
elif (20 in imp):
id_cat=6
elif (32 in imp):
id_cat=4
elif (33 in imp):
id_cat=2
else:
id_cat=5
d.update({'categoria': id_cat})
except:
d= []
return HttpResponse( json.dumps(d), content_type='application/json' )
@login_required
def buscarDatosEmpresa(request):
d= {}
try:
empresa = empresa_actual(request)
d['nombre']= empresa.nombre
d['categ_fiscal']= empresa.categ_fiscal
d['cuit']= empresa.cuit
d['iibb']= empresa.iibb
d['fecha_inicio_activ']= str(empresa.fecha_inicio_activ)
d['domicilio']= empresa.domicilio
d['provincia']= empresa.provincia
d['localidad']= empresa.localidad
d['cod_postal']= empresa.cod_postal
d['email']= empresa.email
d['telefono']= empresa.telefono
d['celular']= empresa.celular
d['nombre_fantasia']= empresa.nombre_fantasia
d['ruta_logo']= empresa.ruta_logo
d['tipo_logo_factura']= empresa.tipo_logo_factura
except:
pass
return HttpResponse( json.dumps(d,cls=DecimalEncoder), content_type='application/json' )
def getVariablesMixin(request):
context = {}
context['ENTIDAD_ID'] = settings.ENTIDAD_ID
context['ENTIDAD_DIR'] = settings.ENTIDAD_DIR
usr= request.user
try:
context['usuario'] = usr.userprofile.id_usuario
except:
context['usuario'] = None
try:
context['usr'] = usr
except:
context['usr'] = None
try:
empresa = usr.userprofile.id_usuario.empresa
except gral_empresa.DoesNotExist:
empresa = None
context['empresa'] = empresa
try:
tipo_usr = usr.userprofile.id_usuario.tipoUsr
context['tipo_usr'] = tipo_usr
context['habilitado_contador'] = habilitado_contador(tipo_usr)
except:
context['tipo_usr'] = 1
context['habilitado_contador'] = False
permisos_grupo = ver_permisos(request)
context['permisos_grupo'] = permisos_grupo
context['permisos_ingresos'] = ('cpb_ventas' in permisos_grupo)or('cpb_cobranzas' in permisos_grupo)or('cpb_remitos' in permisos_grupo)or('cpb_presupuestos' in permisos_grupo)or('cpb_liqprod_abm' in permisos_grupo)
context['permisos_egresos'] = ('cpb_compras' in permisos_grupo)or('cpb_pagos' in permisos_grupo)or('cpb_movimientos' in permisos_grupo)
context['permisos_trabajos'] = ('trab_pedidos' in permisos_grupo)or('trab_trabajos' in permisos_grupo)or('trab_colocacion' in permisos_grupo)
context['permisos_rep_ingr_egr'] = ('rep_cta_cte_clientes' in permisos_grupo)or('rep_saldos_clientes' in permisos_grupo)or('rep_cta_cte_prov' in permisos_grupo)or('rep_saldos_prov' in permisos_grupo)or('rep_varios' in permisos_grupo)
context['permisos_rep_contables'] = ('rep_libro_iva' in permisos_grupo)or('rep_libro_iva' in permisos_grupo)
context['permisos_rep_finanzas'] = ('rep_caja_diaria' in permisos_grupo)or('rep_seguim_cheques' in permisos_grupo)or('rep_saldos_cuentas' in permisos_grupo)
context['permisos_entidades'] = ('ent_clientes' in permisos_grupo)or('ent_proveedores' in permisos_grupo)or('ent_vendedores' in permisos_grupo)
context['permisos_productos'] = ('prod_productos' in permisos_grupo)or('prod_productos_abm' in permisos_grupo)
context['sitio_mobile'] = mobile(request)
context['hoy'] = hoy()
context['EMAIL_CONTACTO'] = EMAIL_CONTACTO
return context
class VariablesMixin(object):
def get_context_data(self, **kwargs):
context = super(VariablesMixin, self).get_context_data(**kwargs)
context['ENTIDAD_ID'] = settings.ENTIDAD_ID
context['ENTIDAD_DIR'] = settings.ENTIDAD_DIR
usr= self.request.user
try:
context['usuario'] = usuario_actual(self.request)
except:
context['usuario'] = None
try:
context['usr'] = usr
except:
context['usr'] = None
try:
empresa = empresa_actual(self.request)
except gral_empresa.DoesNotExist:
empresa = None
context['empresa'] = empresa
context['settings'] = settings
try:
tipo_usr = usr.userprofile.id_usuario.tipoUsr
context['tipo_usr'] = tipo_usr
context['habilitado_contador'] = habilitado_contador(tipo_usr)
except:
context['tipo_usr'] = 1
context['habilitado_contador'] = False
permisos_grupo = ver_permisos(self.request)
context['permisos_grupo'] = permisos_grupo
context['permisos_ingresos'] = ('cpb_ventas' in permisos_grupo)or('cpb_cobranzas' in permisos_grupo)or('cpb_remitos' in permisos_grupo)or('cpb_presupuestos' in permisos_grupo)or('cpb_liqprod_abm' in permisos_grupo)
context['permisos_egresos'] = ('cpb_compras' in permisos_grupo)or('cpb_pagos' in permisos_grupo)or('cpb_movimientos' in permisos_grupo)
context['permisos_trabajos'] = ('trab_pedidos' in permisos_grupo)or('trab_trabajos' in permisos_grupo)or('trab_colocacion' in permisos_grupo)
context['permisos_rep_ingr_egr'] = ('rep_cta_cte_clientes' in permisos_grupo)or('rep_saldos_clientes' in permisos_grupo)or('rep_cta_cte_prov' in permisos_grupo)or('rep_saldos_prov' in permisos_grupo)or('rep_varios' in permisos_grupo)
context['permisos_rep_contables'] = ('rep_libro_iva' in permisos_grupo)or('rep_libro_iva' in permisos_grupo)
context['permisos_entidades'] = ('ent_clientes' in permisos_grupo)or('ent_proveedores' in permisos_grupo)or('ent_vendedores' in permisos_grupo)
context['permisos_productos'] = ('prod_productos' in permisos_grupo)or('prod_productos_abm' in permisos_grupo)
context['permisos_rep_finanzas'] = ('rep_caja_diaria' in permisos_grupo)or('rep_seguim_cheques' in permisos_grupo)or('rep_saldos_cuentas' in permisos_grupo)
context['homologacion'] = empresa.homologacion
context['sitio_mobile'] = mobile(self.request)
context['hoy'] = hoy()
context['EMAIL_CONTACTO'] = EMAIL_CONTACTO
return context
class Month(Func):
function = 'EXTRACT'
template = '%(function)s(MONTH from %(expressions)s)'
output_field = models.IntegerField()
class Year(Func):
function = 'EXTRACT'
template = '%(function)s(YEAR from %(expressions)s)'
output_field = models.IntegerField()
class PrincipalView(VariablesMixin,TemplateView):
template_name = 'index.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(PrincipalView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(PrincipalView, self).get_context_data(**kwargs)
usr= usuario_actual(self.request)
fecha_desde = ultimo_anio()
fecha_hoy = hoy()
pvs = pto_vta_habilitados_list(self.request)
empresas = empresas_habilitadas(self.request)
comprobantes = cpb_comprobante.objects.filter(estado__in=[1,2]).filter(fecha_cpb__range=[fecha_desde, fecha_hoy],empresa=empresa_actual(self.request))
ventas = comprobantes.filter(cpb_tipo__compra_venta='V',pto_vta__in=pvs,cpb_tipo__tipo__in=[1,2,3,9,21,22,23])
total_ventas_mensual = ventas.filter(fecha_cpb__range=[inicioMes(), fecha_hoy])
total_ventas_mensual = total_ventas_mensual.aggregate(sum=Sum(F('importe_total')*F('cpb_tipo__signo_ctacte'), output_field=DecimalField()))['sum'] or 0
total_ventas = ventas.aggregate(sum=Sum(F('importe_total')*F('cpb_tipo__signo_ctacte'), output_field=DecimalField()))['sum'] or 0
context['total_ventas'] = total_ventas
context['total_ventas_mensual'] = total_ventas_mensual
deuda_cobrar_total = ventas.aggregate(sum=Sum(F('saldo')*F('cpb_tipo__signo_ctacte'), output_field=DecimalField()))['sum'] or 0
deuda_cobrar_mensual = ventas.filter(fecha_cpb__range=[inicioMes(), fecha_hoy]).aggregate(sum=Sum(F('saldo')*F('cpb_tipo__signo_ctacte'), output_field=DecimalField()))['sum'] or 0
context['deuda_cobrar_total'] = deuda_cobrar_total
context['deuda_cobrar_mensual'] = deuda_cobrar_mensual
porc_cobrar_total = 0
porc_cobrar_mensual = 0
if total_ventas > 0:
porc_cobrar_total=(deuda_cobrar_total/total_ventas)*100
if total_ventas_mensual > 0:
porc_cobrar_mensual=(deuda_cobrar_mensual/total_ventas_mensual)*100
context['porc_cobrar_mensual'] = porc_cobrar_mensual
context['porc_cobrar_total'] = porc_cobrar_total
compras = comprobantes.filter(cpb_tipo__compra_venta='C',cpb_tipo__tipo__in=[1,2,3,9,21,22,23])
total_compras = compras.aggregate(sum=Sum(F('importe_total')*F('cpb_tipo__signo_ctacte'), output_field=DecimalField()))['sum'] or 0
context['total_compras'] = total_compras
total_compras_mensual = compras.filter(fecha_cpb__range=[inicioMes(), fecha_hoy]).aggregate(sum=Sum(F('importe_total')*F('cpb_tipo__signo_ctacte'), output_field=DecimalField()))['sum'] or 0
context['total_compras_mensual'] = total_compras_mensual
deuda_pagar_total = compras.aggregate(sum=Sum(F('saldo')*F('cpb_tipo__signo_ctacte'), output_field=DecimalField()))['sum'] or 0
deuda_pagar_mensual = compras.filter(fecha_cpb__range=[inicioMes(), fecha_hoy]).aggregate(sum=Sum(F('saldo')*F('cpb_tipo__signo_ctacte'), output_field=DecimalField()))['sum'] or 0
context['deuda_pagar_total'] = deuda_pagar_total
context['deuda_pagar_mensual'] = deuda_pagar_mensual
porc_pagar_total = 0
porc_pagar_mensual = 0
if total_compras > 0:
porc_pagar_total=(deuda_pagar_total/total_compras)*100
if total_compras_mensual > 0:
porc_pagar_mensual=(deuda_pagar_mensual/total_compras_mensual)*100
context['porc_pagar_total'] = porc_pagar_total
context['porc_pagar_mensual'] = porc_pagar_mensual
context['ultimas_ventas'] = ventas.filter(cpb_tipo__id__in=[1,3,5,14]).order_by('-fecha_cpb','-fecha_creacion','-id').select_related('entidad','cpb_tipo','estado')[:10]
context['ultimas_compras'] = compras.filter(cpb_tipo__id__in=[2,4,6,18],estado__in=[1,2]).order_by('-fecha_cpb','-fecha_creacion','-id').select_related('entidad','cpb_tipo','estado')[:10]
# context['ultimos_presup'] = comprobantes.filter(cpb_tipo__id=11).order_by('-fecha_cpb','-fecha_creacion','-id').select_related('entidad','cpb_tipo','estado','presup_aprobacion','presup_aprobacion')[:10]
if usr.tipoUsr==0:
context['tareas'] = gral_tareas.objects.filter(empresa__id__in=empresas).select_related('usuario_creador','usuario_asignado').order_by('-fecha','-fecha_creacion','-id')
else:
context['tareas'] = gral_tareas.objects.filter(empresa__id__in=empresas).filter(Q(usuario_asignado=usr)|Q(usuario_asignado__isnull=True)).select_related('usuario_creador','usuario_asignado').order_by('-fecha','-fecha_creacion','-id')
comprobantes = comprobantes.filter(cpb_tipo__tipo__in=[1,2,3,9,21,22,23]).distinct().annotate(m=Month('fecha_cpb'),anio=Year('fecha_cpb')).order_by(F('anio'),F('m')).values('m','anio')
meses_cpbs = comprobantes.values_list('m','anio')
meses = list()
import locale
locale.setlocale(locale.LC_ALL, '')
ventas_deuda = list()
ventas_pagos = list()
compras_deuda = list()
compras_pagos = list()
for m in meses_cpbs:
meses.append(MESES[m[0]-1][1]+' '+str(m[1])[2:4]+"'")
ventas = comprobantes.filter(cpb_tipo__compra_venta='V',anio=m[1],m=m[0]).annotate(pendiente=Sum(F('saldo')*F('cpb_tipo__signo_ctacte'),output_field=DecimalField()),saldado=Sum((F('importe_total')-F('saldo'))*F('cpb_tipo__signo_ctacte'),output_field=DecimalField())).order_by(F('anio'),F('m'))
compras = comprobantes.filter(cpb_tipo__compra_venta='C',anio=m[1],m=m[0]).annotate(pendiente=Sum(F('saldo')*F('cpb_tipo__signo_ctacte'),output_field=DecimalField()),saldado=Sum((F('importe_total')-F('saldo'))*F('cpb_tipo__signo_ctacte'),output_field=DecimalField())).order_by(F('anio'),F('m'))
if ventas:
ventas_deuda.append(ventas[0].get('pendiente',Decimal(0.00)))
ventas_pagos.append(ventas[0].get('saldado',Decimal(0.00)))
else:
ventas_deuda.append(Decimal(0.00))
ventas_pagos.append(Decimal(0.00))
if compras:
compras_deuda.append(compras[0].get('pendiente',Decimal(0.00)))
compras_pagos.append(compras[0].get('saldado',Decimal(0.00)))
else:
compras_deuda.append(Decimal(0.00))
compras_pagos.append(Decimal(0.00))
context['meses']= json.dumps(meses,cls=DecimalEncoder)
context['ventas_deuda']= json.dumps(ventas_deuda,cls=DecimalEncoder)
context['ventas_pagos']= json.dumps(ventas_pagos,cls=DecimalEncoder)
context['compras_deuda']= json.dumps(compras_deuda,cls=DecimalEncoder)
context['compras_pagos']= json.dumps(compras_pagos,cls=DecimalEncoder)
context['hoy'] = fecha_hoy
context['fecha_desde'] = fecha_desde
productos_vendidos = cpb_comprobante_detalle.objects.filter(cpb_comprobante__pto_vta__in=pvs,cpb_comprobante__cpb_tipo__compra_venta='V',cpb_comprobante__cpb_tipo__tipo__in=[1,2,3,9,21,22,23],cpb_comprobante__estado__in=[1,2],cpb_comprobante__fecha_cpb__range=[fecha_desde, fecha_hoy])
productos_vendidos_total = productos_vendidos.aggregate(sum=Sum(F('importe_total')*F('cpb_comprobante__cpb_tipo__signo_ctacte'), output_field=DecimalField()))['sum'] or 0
productos_vendidos = productos_vendidos.values('producto__nombre').annotate(tot=Sum(F('importe_total')*F('cpb_comprobante__cpb_tipo__signo_ctacte'),output_field=DecimalField())).order_by('-tot')[:10]
context['productos_vendidos']= productos_vendidos
vars_sistema = settings
return context
class EmpresaView(VariablesMixin,ListView):
model = gral_empresa
template_name = 'general/empresas/empresas_listado.html'
context_object_name = 'empresas'
queryset = gral_empresa.objects.filter().order_by('id')
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
limpiar_sesion(self.request)
if not tiene_permiso(self.request,'gral_configuracion'):
return redirect(reverse('principal'))
return super(EmpresaView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(EmpresaView, self).get_context_data(**kwargs)
return context
class EmpresaEditView(VariablesMixin,UpdateView):
form_class = EmpresaForm
model = gral_empresa
pk_url_kwarg = 'id'
template_name = 'general/empresas/empresa_form.html'
success_url = '/'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
if not tiene_permiso(self.request,'gral_configuracion'):
return redirect(reverse('principal'))
return super(EmpresaEditView, self).dispatch(*args, **kwargs)
def form_valid(self, form):
messages.success(self.request, u'Los datos se guardaron con éxito!')
return super(EmpresaEditView, self).form_valid(form)
def get_form_kwargs(self):
kwargs = super(EmpresaEditView, self).get_form_kwargs()
kwargs['request'] = self.request
return kwargs
def form_invalid(self, form):
return self.render_to_response(self.get_context_data(form=form))
def get_initial(self):
initial = super(EmpresaEditView, self).get_initial()
initial['request'] = self.request
return initial
#************* TAREAS **************
class TareasView(VariablesMixin,ListView):
model = gral_tareas
template_name = 'general/tareas/tareas_listado.html'
context_object_name = 'tareas'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
limpiar_sesion(self.request)
if not tiene_permiso(self.request,'gral_tareas'):
return redirect(reverse('principal'))
return super(TareasView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(TareasView, self).get_context_data(**kwargs)
try:
tareas = gral_tareas.objects.filter(empresa__id__in=empresas_habilitadas(self.request)).select_related('usuario_creador','usuario_asignado').order_by('-fecha','-fecha_creacion','-id')
context['tareas'] = tareas
except:
context['tareas'] = None
return context
class TareasCreateView(VariablesMixin,CreateView):
form_class = TareasForm
template_name = 'general/tareas/tareas_form.html'
success_url = '/tareas/'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
if not tiene_permiso(self.request,'gral_tareas'):
return redirect(reverse('tareas_listado'))
return super(TareasCreateView, self).dispatch(*args, **kwargs)
def form_valid(self, form):
# form.instance.empresa = self.request.user.userprofile.id_usuario.empresa
form.instance.usuario_creador = usuario_actual(self.request)
form.instance.empresa = empresa_actual(self.request)
messages.success(self.request, u'Los datos se guardaron con éxito!')
return super(TareasCreateView, self).form_valid(form)
def form_invalid(self, form):
return self.render_to_response(self.get_context_data(form=form))
def get_initial(self):
initial = super(TareasCreateView, self).get_initial()
return initial
class TareasEditView(VariablesMixin,UpdateView):
form_class = TareasForm
model = gral_tareas
pk_url_kwarg = 'id'
template_name = 'general/tareas/tareas_form.html'
success_url = '/tareas/'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
if not tiene_permiso(self.request,'gral_tareas'):
return redirect(reverse('tareas_listado'))
return super(TareasEditView, self).dispatch(*args, **kwargs)
def form_valid(self, form):
messages.success(self.request, u'Los datos se guardaron con éxito!')
return super(TareasEditView, self).form_valid(form)
def get_initial(self):
initial = super(TareasEditView, self).get_initial()
return initial
@login_required
def TareasDeleteView(request, id):
t = get_object_or_404(gral_tareas, id=id)
if not tiene_permiso(request,'gral_tareas'):
return redirect(reverse('tareas_listado'))
t.delete()
messages.success(request, u'Los datos se guardaron con éxito!')
return redirect('tareas_listado')
#***************************
@login_required
def recargar_clientes(request):
context={}
clientes = egr_entidad.objects.filter(tipo_entidad=1,baja=False,empresa__id__in=empresas_habilitadas(request)).distinct().order_by('apellido_y_nombre')
context["clientes"]=[{'detalle':p.__unicode__(),'id':p.pk} for p in clientes]
return HttpResponse(json.dumps(context))
@login_required
def recargar_vendedores(request):
context={}
vendedores = egr_entidad.objects.filter(tipo_entidad=3,baja=False,empresa__id__in=empresas_habilitadas(request)).distinct().order_by('apellido_y_nombre')
context["vendedores"]=[{'detalle':p.__unicode__(),'id':p.pk} for p in vendedores]
return HttpResponse(json.dumps(context))
@login_required
def recargar_proveedores(request):
context={}
proveedores = egr_entidad.objects.filter(tipo_entidad=2,baja=False,empresa__id__in=empresas_habilitadas(request)).distinct().order_by('apellido_y_nombre')
context["proveedores"]=[{'detalle':p.__unicode__(),'id':p.pk} for p in proveedores]
return HttpResponse(json.dumps(context))
@login_required
def recargar_productos(request,tipo):
context={}
productos = prod_productos.objects.filter(baja=False,mostrar_en__in=(tipo,3),empresa__id__in=empresas_habilitadas(request)).distinct().order_by('nombre','codigo')
prods = [{'detalle':p.get_prod_busqueda(),'id':p.pk} for p in productos]
context["productos"]= prods
return HttpResponse(json.dumps(context))
@login_required
def entidad_baja_reactivar(request,id):
entidad = egr_entidad.objects.get(pk=id)
entidad.baja = not entidad.baja
entidad.save()
return HttpResponseRedirect(entidad.get_listado())
from django.http import HttpResponse
from PIL import Image
def chequear_email(request,id):
try:
cpb=cpb_comprobante.objects.get(pk=id)
if cpb.fecha_envio_mail:
cpb.fecha_recepcion_mail = date.today()
cpb.save()
red = Image.new('RGB', (1, 1))
response = HttpResponse(content_type="image/png")
red.save(response, "PNG")
return response
except:
HttpResponse('ERROR')
def codbar(request):
cod = "272211991410600037040117046218920201016"
dv1= str(digVerificador(cod))
return HttpResponse(dv1)
```
#### File: IronWeb/ggcontable/wsgi_labartoladeco.py
```python
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
PROJECT_DIR = os.path.abspath(os.path.join(BASE_DIR, '..'))
sys.path.append(PROJECT_DIR)
os.environ['DJANGO_SETTINGS_MODULE'] = "ggcontable.opal"
os.environ['ENTIDAD_ID'] = '1'
os.environ['ENTIDAD_DB'] = 'ironweb_labartoladeco'
os.environ['ENTIDAD_DIR'] = 'labartoladeco'
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# try:
# _application = None
# def application(environ, start_response):
# global _application
# if _application == None:
# os.environ['ENTIDAD_ID'] = environ['ENTIDAD_ID']
# os.environ['ENTIDAD_DB'] = environ['ENTIDAD_DB']
# os.environ['ENTIDAD_DIR'] = environ['ENTIDAD_DIR']
# _application = get_wsgi_application()
# return _application(environ, start_response)
# except Exception as e:
# logger.error('Admin Command Error: %s', ' '.join(sys.argv), exc_info=sys.exc_info())
# raise e
```
#### File: jmjacquet/IronWeb/log_analizer.py
```python
import re
#[23/Feb/2021 09:16:39] ERROR [django.request:256] Internal Server Error:
import pprint
def encabezado(text):
#return re.match(r'\[\d{1,2}/\w{1,3}/\d{1,4} \d{1,2}:\d{1,2}:\d{1,2}\]',text)
return re.match(r'\[\d{1,2}/Feb/\d{1,4} \d{1,2}:\d{1,2}:\d{1,2}\]',text)
def log_parser():
try:
dicc = {}
ruta = ''
with open('errores.log') as in_file:
for line in in_file:
if encabezado(line):
fecha,ruta = line.split("ERROR [django.request:256] Internal Server Error:")
dicc[ruta] = []
else:
dicc[ruta].append(line)
pprint.pprint([dicc[x][-3:] for x in dicc])
except Exception as e:
print e
if __name__ == '__main__':
log_parser()
```
#### File: IronWeb/pyafipws/pyrece.py
```python
"Aplicativo AdHoc Para generación de Facturas Electrónicas"
__author__ = "<NAME> (<EMAIL>)"
__copyright__ = "Copyright (C) 2009-2017 <NAME>"
__license__ = "GPL 3.0"
__version__ = "1.31a"
from datetime import datetime
from decimal import Decimal, getcontext, ROUND_DOWN
import os
import sys
import wx
import gui
import unicodedata
import traceback
from ConfigParser import SafeConfigParser
import wsaa, wsfev1, wsfexv1
from utils import SimpleXMLElement, SoapClient, SoapFault, date
from email.mime.text import MIMEText
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from smtplib import SMTP
#from PyFPDF.ejemplos.form import Form
from pyfepdf import FEPDF
# Formatos de archivos:
from formatos import formato_xml, formato_csv, formato_dbf, formato_txt, formato_json
try:
from numeros import conv_text
except:
conv_text = lambda num: str(num)
HOMO = False
DEBUG = '--debug' in sys.argv
CONFIG_FILE = "rece.ini"
ACERCA_DE = u"""
PyRece: Aplicativo AdHoc para generar Facturas Electrónicas
Copyright (C) 2008-2015 <NAME> <EMAIL>
Este progarma es software libre, se entrega ABSOLUTAMENTE SIN GARANTIA
y es bienvenido a redistribuirlo bajo la licencia GPLv3.
Para información adicional y descargas ver:
http://www.sistemasagiles.com.ar/
"""
INSTRUCTIVO = U"""
Forma de uso:
* Examinar: para buscar el archivo a procesar (opcional)
* Cargar: para leer los datos del archivo de facturas a procesar
* Autenticar: para iniciar la sesión en los servidores de AFIP (obligatorio antes de autorizar)
* Marcar Todo: para seleccionar todas las facturas
* Autorizar: para autorizar las facturas seleccionadas, completando el CAE y demás datos
* Autorizar Lote: para autorizar en un solo lote las facturas seleccionadas
* Grabar: para almacenar los datos procesados en el archivo de facturas
* Previsualizar: para ver por pantalla la factura seleccionadas
* Enviar: para envia por correo electrónico las facturas seleccionadas
Para solicitar soporte comercial, escriba a <EMAIL>
"""
class PyRece(gui.Controller):
def on_load(self, event):
self.cols = []
self.items = []
self.paths = [entrada]
self.token = self.sign = ""
self.smtp = None
self.webservice = None
if entrada and os.path.exists(entrada):
self.cargar()
self.components.cboWebservice.value = DEFAULT_WEBSERVICE
self.on_cboWebservice_click(event)
self.tipos = {
1:u"Factura A",
2:u"Notas de Débito A",
3:u"Notas de Crédito A",
4:u"Recibos A",
5:u"Notas de Venta al contado A",
6:u"Facturas B",
7:u"Notas de Débito B",
8:u"Notas de Crédito B",
9:u"Recibos B",
10:u"Notas de Venta al contado B",
19:u"Facturas de Exportación",
20:u"Nota de Débito por Operaciones con el Exterior",
21:u"Nota de Crédito por Operaciones con el Exterior",
39:u"Otros comprobantes A que cumplan con la R.G. N° 3419",
40:u"Otros comprobantes B que cumplan con la R.G. N° 3419",
60:u"Cuenta de Venta y Líquido producto A",
61:u"Cuenta de Venta y Líquido producto B",
63:u"Liquidación A",
64:u"Liquidación B",
11:u"Factura C",
12:u"Nota de Débito C",
13:u"Nota de Crédito C",
15:u"Recibo C",
}
self.component.bgcolor = "light gray"
# deshabilito ordenar
##self.components.lvwListado.GetColumnSorter = lambda: lambda x,y: 0
def set_cols(self, cols):
self.__cols = cols
lv = self.components.lvwListado
# remove old columns:
lv.clear_all()
# insert new columns
for col in cols:
ch = gui.ListColumn(lv, name=col, text=col.replace("_"," ").title(), align="left")
def get_cols(self):
return self.__cols
cols = property(get_cols, set_cols)
def set_items(self, items):
cols = self.cols
self.__items = items
def convert_str(value):
if value is None:
return ''
elif isinstance(value, str):
return unicode(value,'latin1')
elif isinstance(value, unicode):
return value
else:
return str(value)
self.components.lvwListado.items = [[convert_str(item[col]) for col in cols] for item in items]
wx.SafeYield()
def get_items(self):
return self.__items
items = property(get_items, set_items)
def get_selected_items(self):
for it in self.components.lvwListado.get_selected_items():
yield it.index, it
def set_selected_items(self, selected):
for it in selected:
it.selected = True
def set_paths(self, paths):
self.__paths = paths
self.components.txtArchivo.value = ', '.join([fn for fn in paths])
def get_paths(self):
return self.__paths
paths = property(get_paths, set_paths)
def log(self, msg):
if not isinstance(msg, unicode):
msg = unicode(msg, "latin1","ignore")
print "LOG", msg
self.components.txtEstado.value = msg + u"\n" + self.components.txtEstado.value
wx.SafeYield()
f = None
try:
f = open("pyrece.log","a")
f.write("%s: " % (datetime.now(), ))
f.write(msg.encode("ascii", "ignore"))
f.write("\n\r")
except Exception, e:
print e
finally:
if f:
f.close()
def progreso(self, value):
if self.items:
per = (value+1)/float(len(self.items))*100
self.components.pbProgreso.value = per
wx.SafeYield()
def error(self, code, text):
ex = traceback.format_exception( sys.exc_type, sys.exc_value, sys.exc_traceback)
self.log(''.join(ex))
gui.alert(text, 'Error %s' % code)
def verifica_ws(self):
if not self.ws:
gui.alert("Debe seleccionar el webservice a utilizar!", 'Advertencia')
raise RuntimeError()
if not self.token or not self.sign:
gui.alert("Debe autenticarse con AFIP!", 'Advertencia')
raise RuntimeError()
self.ws.Dummy()
def on_btnMarcarTodo_click(self, event):
for it in self.components.lvwListado.items:
it.selected = True
def on_menu_consultas_dummy_click(self, event):
##self.verifica_ws()
try:
if self.webservice in ("wsfev1", "wsfexv1"):
self.ws.Dummy()
msg = "AppServ %s\nDbServer %s\nAuthServer %s" % (
self.ws.AppServerStatus, self.ws.DbServerStatus, self.ws.AuthServerStatus)
location = self.ws.client.location
else:
msg = "%s no soportado" % self.webservice
location = ""
gui.alert(msg, location)
except Exception, e:
self.error(u'Excepción',unicode(str(e),"latin1","ignore"))
def on_menu_consultas_lastCBTE_click(self, event):
##self.verifica_ws()
options = [v for k,v in sorted([(k,v) for k,v in self.tipos.items()])]
result = gui.single_choice(options, "Tipo de comprobante",
u"Consulta Último Nro. Comprobante",
)
if not result:
return
tipocbte = [k for k,v in self.tipos.items() if v==result][0]
result = gui.prompt(u"Punto de venta",
u"Consulta Último Nro. Comprobante", '2')
if not result:
return
ptovta = result
try:
if self.webservice=="wsfev1":
ultcmp = "%s (wsfev1)" % self.ws.CompUltimoAutorizado(tipocbte, ptovta)
elif self.webservice=="wsfexv1":
ultcmp = "%s (wsfexv1)" % self.ws.GetLastCMP(tipocbte, ptovta)
gui.alert(u"Último comprobante: %s\n"
u"Tipo: %s (%s)\nPunto de Venta: %s" % (ultcmp, self.tipos[tipocbte],
tipocbte, ptovta), u'Consulta Último Nro. Comprobante')
except SoapFault,e:
self.log(self.client.xml_request)
self.log(self.client.xml_response)
self.error(e.faultcode, e.faultstring.encode("ascii","ignore"))
except Exception, e:
self.error(u'Excepción',unicode(str(e),"latin1","ignore"))
def on_menu_consultas_getCAE_click(self, event):
self.verifica_ws()
options = [v for k,v in sorted([(k,v) for k,v in self.tipos.items()])]
result = gui.single_choice(options, "Tipo de comprobante",
u"Consulta Comprobante",
)
if not result:
return
tipocbte = [k for k,v in self.tipos.items() if v==result][0]
result = gui.prompt(u"Punto de venta",
u"Consulta Comprobante", '2')
if not result:
return
ptovta = result
result = gui.prompt(u"Nº de comprobante",
u"Consulta Comprobante", '2')
if not result:
return
nrocbte = result
try:
if self.webservice=="wsfe":
cae = 'no soportado'
elif self.webservice=="wsfev1":
cae = "%s (wsfev1)" % self.ws.CompConsultar(tipocbte, ptovta, nrocbte)
self.log('CAE: %s' % self.ws.CAE)
self.log('FechaCbte: %s' % self.ws.FechaCbte)
self.log('PuntoVenta: %s' % self.ws.PuntoVenta)
self.log('CbteNro: %s' % self.ws.CbteNro)
self.log('ImpTotal: %s' % self.ws.ImpTotal)
self.log('ImpNeto: %s' % self.ws.ImpNeto)
self.log('ImptoLiq: %s' % self.ws.ImptoLiq)
self.log('EmisionTipo: %s' % self.ws.EmisionTipo)
elif self.webservice=="wsfexv1":
cae = "%s (wsfexv1)" % self.ws.GetCMP(tipocbte, ptovta, nrocbte)
self.log('CAE: %s' % self.ws.CAE)
self.log('FechaCbte: %s' % self.ws.FechaCbte)
self.log('PuntoVenta: %s' % self.ws.PuntoVenta)
self.log('CbteNro: %s' % self.ws.CbteNro)
self.log('ImpTotal: %s' % self.ws.ImpTotal)
gui.alert(u"CAE: %s\n"
u"Tipo: %s (%s)\nPunto de Venta: %s\nNumero: %s\nFecha: %s" % (
cae, self.tipos[tipocbte],
tipocbte, ptovta, nrocbte, self.ws.FechaCbte),
u'Consulta Comprobante')
except SoapFault,e:
self.log(self.client.xml_request)
self.log(self.client.xml_response)
self.error(e.faultcode, e.faultstring.encode("ascii","ignore"))
except Exception, e:
self.error(u'Excepción',unicode(str(e),"latin1","ignore"))
def on_menu_consultas_lastID_click(self, event):
##self.verifica_ws()
try:
if self.webservice=="wsfexv1":
ultnro = self.ws.GetLastID()
else:
ultnro = None
gui.alert(u"Último ID (máximo): %s" % (ultnro),
u'Consulta Último ID')
except SoapFault,e:
self.log(self.client.xml_request)
self.log(self.client.xml_response)
self.error(e.faultcode, e.faultstring.encode("ascii","ignore"))
except Exception, e:
self.error(u'Excepción',unicode(e))
def on_menu_ayuda_acercade_click(self, event):
text = ACERCA_DE
gui.alert(text, u'Acerca de PyRece Versión %s' % __version__)
def on_menu_ayuda_instructivo_click(self, event):
text = INSTRUCTIVO
gui.alert(text, u'Instructivo de PyRece')
def on_menu_ayuda_limpiar_click(self, event):
self.components.txtEstado.value = ""
def on_menu_ayuda_mensajesXML_click(self, event):
self.verifica_ws()
self.components.txtEstado.value = u"XmlRequest:\n%s\n\nXmlResponse:\n%s" % (
self.ws.xml_request, self.ws.xml_response)
self.component.size = (592, 517)
def on_menu_ayuda_estado_click(self, event):
if self.component.size[1]<517:
self.component.size = (592, 517)
else:
self.component.size = (592, 265)
def on_menu_ayuda_configuracion_click(self, event):
self.components.txtEstado.value = open(CONFIG_FILE).read()
self.component.size = (592, 517)
def on_cboWebservice_click(self, event):
self.webservice = self.components.cboWebservice.value
self.ws = None
self.token = None
self.sign = None
if self.webservice == "wsfev1":
self.ws = wsfev1.WSFEv1()
elif self.webservice == "wsfexv1":
self.ws = wsfexv1.WSFEXv1()
def on_btnAutenticar_click(self, event):
try:
if self.webservice in ('wsfe', ):
service = "wsfe"
elif self.webservice in ('wsfev1', ):
self.log("Conectando WSFEv1... " + wsfev1_url)
self.ws.Conectar("",wsfev1_url, proxy_dict, timeout=60, cacert=CACERT, wrapper=WRAPPER)
self.ws.Cuit = cuit
service = "wsfe"
elif self.webservice in ('wsfex', 'wsfexv1'):
self.log("Conectando WSFEXv1... " + wsfexv1_url)
self.ws.Conectar("",wsfexv1_url, proxy_dict, cacert=CACERT, wrapper=WRAPPER)
self.ws.Cuit = cuit
service = "wsfex"
else:
gui.alert('Debe seleccionar servicio web!', 'Advertencia')
return
self.log("Creando TRA %s ..." % service)
ws = wsaa.WSAA()
tra = ws.CreateTRA(service)
self.log("Frimando TRA (CMS) con %s %s..." % (str(cert),str(privatekey)))
cms = ws.SignTRA(str(tra),str(cert),str(privatekey))
self.log("Llamando a WSAA... " + wsaa_url)
ws.Conectar("", wsdl=wsaa_url, proxy=proxy_dict, cacert=CACERT, wrapper=WRAPPER)
self.log("Proxy: %s" % proxy_dict)
xml = ws.LoginCMS(str(cms))
self.log("Procesando respuesta...")
if xml:
self.token = ws.Token
self.sign = ws.Sign
if DEBUG:
self.log("Token: %s" % self.token)
self.log("Sign: %s" % self.sign)
elif self.token and self.sign:
self.log("Token: %s... OK" % self.token[:10])
self.log("Sign: %s... OK" % self.sign[:10])
if self.webservice in ("wsfev1", "wsfexv1"):
self.ws.Token = self.token
self.ws.Sign = self.sign
if xml:
gui.alert('Autenticado OK!', 'Advertencia')
else:
gui.alert(u'Respuesta: %s' % ws.XmlResponse, u'No se pudo autenticar: %s' % ws.Excepcion)
except SoapFault,e:
self.error(e.faultcode, e.faultstring.encode("ascii","ignore"))
except Exception, e:
self.error(u'Excepción',unicode(e))
def examinar(self):
filename = entrada
wildcard = ["Planillas Excel (*.xlsx)|*.xlsx",
"Archivos CSV (*.csv)|*.csv",
"Archivos XML (*.xml)|*.xml",
"Archivos TXT (*.txt)|*.txt",
"Archivos DBF (*.dbf)|*.dbf",
"Archivos JSON (*.json)|*.json",
]
if entrada.endswith("xml"):
wildcard.sort(reverse=True)
result = gui.open_file('Abrir', 'datos', filename, '|'.join(wildcard))
if not result:
return
self.paths = [result]
def on_menu_archivo_abrir_click(self, event):
self.examinar()
self.cargar()
def on_menu_archivo_cargar_click(self, event):
self.cargar()
def cargar(self):
try:
items = []
for fn in self.paths:
if fn.lower().endswith(".csv") or fn.lower().endswith(".xlsx"):
filas = formato_csv.leer(fn)
items.extend(filas)
elif fn.lower().endswith(".xml"):
regs = formato_xml.leer(fn)
items.extend(formato_csv.aplanar(regs))
elif fn.lower().endswith(".txt"):
regs = formato_txt.leer(fn)
items.extend(formato_csv.aplanar(regs))
elif fn.lower().endswith(".dbf"):
reg = formato_dbf.leer(conf_dbf, carpeta=os.path.dirname(fn))
items.extend(formato_csv.aplanar(reg.values()))
elif fn.lower().endswith(".json"):
regs = formato_json.leer(fn)
items.extend(formato_csv.aplanar(regs))
else:
self.error(u'Formato de archivo desconocido: %s', unicode(fn))
if len(items) < 2:
gui.alert(u'El archivo no tiene datos válidos', 'Advertencia')
# extraer los nombres de columnas (ignorar vacios de XLSX)
cols = items and [str(it).strip() for it in items[0] if it] or []
if DEBUG: print "Cols",cols
# armar diccionario por cada linea
items = [dict([(col,item[i]) for i, col in enumerate(cols)])
for item in items[1:]]
self.cols = cols
self.items = items
except Exception,e:
self.error(u'Excepción',unicode(e))
##raise
def on_menu_archivo_guardar_click(self, event):
filename = entrada
wildcard = ["Archivos CSV (*.csv)|*.csv", "Archivos XML (*.xml)|*.xml",
"Archivos TXT (*.txt)|*.txt", "Archivos DBF (*.dbf)|*.dbf",
"Archivos JSON (*.json)|*.json",
"Planillas Excel (*.xlsx)|*.xlsx",
]
if entrada.endswith("xml"):
wildcard.sort(reverse=True)
if self.paths:
path = self.paths[0]
else:
path = salida
result = gui.save_file(title='Guardar', filename=path,
wildcard='|'.join(wildcard))
if not result:
return
fn = result[0]
self.grabar(fn)
def grabar(self, fn=None):
try:
if fn is None and salida:
if salida.startswith("-") and self.paths:
fn = os.path.splitext(self.paths[0])[0] + salida
else:
fn = salida
elif not fn:
raise RuntimeError("Debe indicar un nombre de archivo para grabar")
if fn.lower().endswith(".csv") or fn.lower().endswith(".xlsx"):
formato_csv.escribir([self.cols] + [[item[k] for k in self.cols] for item in self.items], fn)
else:
regs = formato_csv.desaplanar([self.cols] + [[item[k] for k in self.cols] for item in self.items])
if fn.endswith(".xml"):
formato_xml.escribir(regs, fn)
elif fn.endswith(".txt"):
formato_txt.escribir(regs, fn)
elif fn.endswith(".dbf"):
formato_dbf.escribir(regs, conf_dbf, carpeta=os.path.dirname(fn))
elif fn.endswith(".json"):
formato_json.escribir(regs, fn)
else:
self.error(u'Formato de archivo desconocido', unicode(fn))
gui.alert(u'Se guardó con éxito el archivo:\n%s' % (unicode(fn),), 'Guardar')
except Exception, e:
self.error(u'Excepción',unicode(e))
def on_btnAutorizar_click(self, event):
self.verifica_ws()
try:
ok = procesadas = rechazadas = 0
cols = self.cols
items = []
self.progreso(0)
selected = []
for i, item in self.get_selected_items():
kargs = item.copy()
selected.append(item)
kargs['cbt_desde'] = kargs['cbt_hasta'] = kargs ['cbt_numero']
for key in kargs:
if isinstance(kargs[key], basestring):
kargs[key] = kargs[key].replace(",",".")
if self.webservice == 'wsfev1':
encabezado = {}
for k in ('concepto', 'tipo_doc', 'nro_doc', 'tipo_cbte', 'punto_vta',
'cbt_desde', 'cbt_hasta', 'imp_total', 'imp_tot_conc', 'imp_neto',
'imp_iva', 'imp_trib', 'imp_op_ex', 'fecha_cbte',
'moneda_id', 'moneda_ctz'):
encabezado[k] = kargs[k]
for k in ('fecha_venc_pago', 'fecha_serv_desde', 'fecha_serv_hasta'):
if k in kargs:
encabezado[k] = kargs.get(k)
self.ws.CrearFactura(**encabezado)
for l in range(1,1000):
k = 'tributo_%%s_%s' % l
if (k % 'id') in kargs:
id = kargs[k % 'id']
desc = kargs[k % 'desc']
base_imp = kargs[k % 'base_imp']
alic = kargs[k % 'alic']
importe = kargs[k % 'importe']
if id:
self.ws.AgregarTributo(id, desc, base_imp, alic, importe)
else:
break
for l in range(1,1000):
k = 'iva_%%s_%s' % l
if (k % 'id') in kargs:
id = kargs[k % 'id']
base_imp = kargs[k % 'base_imp']
importe = kargs[k % 'importe']
if id:
self.ws.AgregarIva(id, base_imp, importe)
else:
break
for l in range(1,1000):
k = 'cbte_asoc_%%s_%s' % l
if (k % 'tipo') in kargs:
tipo = kargs[k % 'tipo']
pto_vta = kargs[k % 'pto_vta']
nro = kargs[k % 'nro']
if id:
self.ws.AgregarCmpAsoc(tipo, pto_vta, nro)
else:
break
for l in range(1,1000):
k = 'opcional_%%s_%s' % l
if (k % 'id') in kargs:
op_id = kargs[k % 'id']
valor = kargs[k % 'valor']
if op_id:
self.ws.AgregarOpcional(op_id, valor)
else:
break
if DEBUG:
self.log('\n'.join(["%s='%s'" % (k,v) for k,v in self.ws.factura.items()]))
cae = self.ws.CAESolicitar()
kargs.update({
'cae': self.ws.CAE,
'fecha_vto': self.ws.Vencimiento,
'resultado': self.ws.Resultado,
'motivo': self.ws.Obs,
'reproceso': self.ws.Reproceso,
'err_code': self.ws.ErrCode.encode("latin1"),
'err_msg': self.ws.ErrMsg.encode("latin1"),
})
if self.ws.ErrMsg:
gui.alert(self.ws.ErrMsg, "Error AFIP")
if self.ws.Obs and self.ws.Obs!='00':
gui.alert(self.ws.Obs, u"Observación AFIP")
elif self.webservice == 'wsfexv1':
kargs['cbte_nro'] = kargs ['cbt_numero']
kargs['permiso_existente'] = kargs['permiso_existente'] or ""
encabezado = {}
for k in ('tipo_cbte', 'punto_vta', 'cbte_nro', 'fecha_cbte',
'imp_total', 'tipo_expo', 'permiso_existente', 'pais_dst_cmp',
'nombre_cliente', 'cuit_pais_cliente', 'domicilio_cliente',
'id_impositivo', 'moneda_id', 'moneda_ctz',
'obs_comerciales', 'obs_generales', 'forma_pago', 'incoterms',
'idioma_cbte', 'incoterms_ds'):
encabezado[k] = kargs.get(k)
self.ws.CrearFactura(**encabezado)
for l in range(1,1000):
k = 'codigo%s' % l
if k in kargs:
codigo = kargs['codigo%s' % l]
ds = kargs['descripcion%s' % l]
qty = kargs['cantidad%s' % l]
umed = kargs['umed%s' % l]
precio = kargs['precio%s' % l]
importe = kargs['importe%s' % l]
bonif = kargs.get('bonif%s' % l)
self.ws.AgregarItem(codigo, ds, qty, umed, precio, importe, bonif)
else:
break
for l in range(1,1000):
k = 'cbte_asoc_%%s_%s' % l
if (k % 'tipo') in kargs:
tipo = kargs[k % 'tipo']
pto_vta = kargs[k % 'pto_vta']
nro = kargs[k % 'nro']
if id:
self.ws.AgregarCmpAsoc(tipo, pto_vta, nro)
else:
break
if DEBUG:
self.log('\n'.join(["%s='%s'" % (k,v) for k,v in self.ws.factura.items()]))
cae = self.ws.Authorize(kargs['id'])
kargs.update({
'cae': self.ws.CAE,
'fecha_vto': self.ws.Vencimiento,
'resultado': self.ws.Resultado,
'motivo': self.ws.Obs,
'reproceso': self.ws.Reproceso,
'err_code': self.ws.ErrCode.encode("latin1"),
'err_msg': self.ws.ErrMsg.encode("latin1"),
})
if self.ws.ErrMsg:
gui.alert(self.ws.ErrMsg, "Error AFIP")
if self.ws.Obs and self.ws.Obs!='00':
gui.alert(self.ws.Obs, u"Observación AFIP")
# actualizo la factura
for k in ('cae', 'fecha_vto', 'resultado', 'motivo', 'reproceso', 'err_code', 'err_msg'):
if kargs.get(k):
item[k] = kargs[k] if kargs[k] is not None else ""
self.items[i] = item
self.log(u"ID: %s CAE: %s Motivo: %s Reproceso: %s" % (kargs['id'], kargs['cae'], kargs['motivo'],kargs['reproceso']))
procesadas += 1
if kargs['resultado'] == "R":
rechazadas += 1
elif kargs['resultado'] == "A":
ok += 1
self.progreso(i)
self.items = self.items
self.set_selected_items(selected)
self.progreso(len(self.items) - 1)
gui.alert(u'Proceso finalizado, procesadas %d\n\n'
'Aceptadas: %d\n'
'Rechazadas: %d' % (procesadas, ok, rechazadas),
u'Autorización')
self.grabar()
except SoapFault, e:
self.error(e.faultcode, e.faultstring.encode("ascii","ignore"))
except KeyError, e:
self.error("Error",u'Campo obligatorio no encontrado: %s' % e)
except Exception, e:
self.error(u'Excepción',unicode(e))
finally:
if DEBUG:
if self.webservice == 'wsfev1' and DEBUG:
print self.ws.XmlRequest
print self.ws.XmlResponse
def on_btnAutorizarLote_click(self, event):
self.verifica_ws()
if not self.items: return
try:
#getcontext().prec = 2
ok = 0
rechazadas = 0
cols = self.cols
items = []
self.progreso(0)
cbt_desde = cbt_hasta = None
datos = {
'tipo_cbte': None,
'punto_vta': None,
'fecha_cbte': None,
'fecha_venc_pago': None,
'fecha_cbte': None,
'fecha_venc_pago': None,
'fecha_serv_desde': None,
'fecha_serv_hasta': None,
'moneda_id': None,
'moneda_ctz': None,
'id': None,
}
importes = {
'imp_total': Decimal(0),
'imp_tot_conc': Decimal(0),
'imp_neto': Decimal(0),
'imp_iva':Decimal(0),
'imp_op_ex': Decimal(0),
'imp_trib': Decimal(0),
}
for l in range(1,5):
k = 'iva_%%s_%s' % l
datos[k % 'id'] = None
importes[k % 'base_imp'] = Decimal(0)
importes[k % 'importe'] = Decimal(0)
for l in range(1,10):
k = 'tributo_%%s_%s' % l
datos[k % 'id'] = None
datos[k % 'desc'] = None
importes[k % 'base_imp'] = Decimal(0)
datos[k % 'alic'] = None
importes[k % 'importe'] = Decimal(0)
for i, item in self.get_selected_items():
if cbt_desde is None or int(item['cbt_numero']) < cbt_desde:
cbt_desde = int(item['cbt_numero'])
if cbt_hasta is None or int(item['cbt_numero']) > cbt_hasta:
cbt_hasta = int(item['cbt_numero'])
for key in item:
if key in datos:
if datos[key] is None:
datos[key] = item[key]
elif datos[key] != item[key]:
raise RuntimeError(u"%s tiene valores distintos en el lote!" % key)
if key in importes and item[key]:
importes[key] = importes[key] + Decimal("%.2f" % float(str(item[key].replace(",","."))))
kargs = {'cbt_desde': cbt_desde, 'cbt_hasta': cbt_hasta}
kargs.update({'tipo_doc': 99, 'nro_doc': '0'})
kargs.update(datos)
kargs.update(importes)
if kargs['fecha_serv_desde'] and kargs['fecha_serv_hasta']:
kargs['presta_serv'] = 1
kargs['concepto'] = 2
else:
kargs['presta_serv'] = 0
kargs['concepto'] = 1
del kargs['fecha_serv_desde']
del kargs['fecha_serv_hasta']
for key, val in importes.items():
importes[key] = val.quantize(Decimal('.01'), rounding=ROUND_DOWN)
if 'id' not in kargs or kargs['id'] == "":
id = long(kargs['cbt_desde'])
id += (int(kargs['tipo_cbte'])*10**4 + int(kargs['punto_vta']))*10**8
kargs['id'] = id
if DEBUG:
self.log('\n'.join(["%s='%s'" % (k,v) for k,v in kargs.items()]))
if '--test' in sys.argv:
kargs['cbt_desde'] = 777
kargs['fecha_cbte'] = '20110802'
kargs['fecha_venc_pago'] = '20110831'
if gui.confirm("Confirma Lote:\n"
"Tipo: %(tipo_cbte)s Desde: %(cbt_desde)s Hasta %(cbt_hasta)s\n"
"Neto: %(imp_neto)s IVA: %(imp_iva)s Trib.: %(imp_trib)s Total: %(imp_total)s"
% kargs, "Autorizar lote:"):
if self.webservice == 'wsfev1':
encabezado = {}
for k in ('concepto', 'tipo_doc', 'nro_doc', 'tipo_cbte', 'punto_vta',
'cbt_desde', 'cbt_hasta', 'imp_total', 'imp_tot_conc', 'imp_neto',
'imp_iva', 'imp_trib', 'imp_op_ex', 'fecha_cbte',
'moneda_id', 'moneda_ctz'):
encabezado[k] = kargs[k]
for k in ('fecha_venc_pago', 'fecha_serv_desde', 'fecha_serv_hasta'):
if k in kargs:
encabezado[k] = kargs.get(k)
self.ws.CrearFactura(**encabezado)
for l in range(1,1000):
k = 'iva_%%s_%s' % l
if (k % 'id') in kargs:
id = kargs[k % 'id']
base_imp = kargs[k % 'base_imp']
importe = kargs[k % 'importe']
if id:
self.ws.AgregarIva(id, base_imp, importe)
else:
break
for l in range(1,1000):
k = 'tributo_%%s_%s' % l
if (k % 'id') in kargs:
id = kargs[k % 'id']
desc = kargs[k % 'desc']
base_imp = kargs[k % 'base_imp']
alic = kargs[k % 'alic']
importe = kargs[k % 'importe']
if id:
self.ws.AgregarTributo(id, desc, base_imp, alic, importe)
else:
break
if DEBUG:
self.log('\n'.join(["%s='%s'" % (k,v) for k,v in self.ws.factura.items()]))
cae = self.ws.CAESolicitar()
kargs.update({
'cae': self.ws.CAE,
'fecha_vto': self.ws.Vencimiento,
'resultado': self.ws.Resultado,
'motivo': self.ws.Obs,
'reproceso': self.ws.Reproceso,
'err_code': self.ws.ErrCode.encode("latin1"),
'err_msg': self.ws.ErrMsg.encode("latin1"),
})
if self.ws.ErrMsg:
gui.alert(self.ws.ErrMsg, "Error AFIP")
if self.ws.Obs and self.ws.Obs!='00':
gui.alert(self.ws.Obs, u"Observación AFIP")
for i, item in self.get_selected_items():
for key in ('id', 'cae', 'fecha_vto', 'resultado', 'motivo', 'reproceso', 'err_code', 'err_msg'):
item[key] = kargs[key] if kargs[key] is not None else ""
self.items[i] = item
self.log("ID: %s CAE: %s Motivo: %s Reproceso: %s" % (kargs['id'], kargs['cae'], kargs['motivo'],kargs['reproceso']))
if kargs['resultado'] == "R":
rechazadas += 1
elif kargs['resultado'] == "A":
ok += 1
self.items = self.items # refrescar, ver de corregir
self.progreso(len(self.items))
gui.alert('Proceso finalizado OK!\n\nAceptadas: %d\nRechazadas: %d' % (ok, rechazadas), 'Autorización')
self.grabar()
except SoapFault,e:
self.log(self.client.xml_request)
self.log(self.client.xml_response)
self.error(e.faultcode, e.faultstring.encode("ascii","ignore"))
except Exception, e:
self.error(u'Excepción',unicode(e))
def on_btnPrevisualizar_click(self, event):
try:
j = 0
for i, item in self.get_selected_items():
j += 1
archivo = self.generar_factura(item, mostrar=(j==1))
except Exception, e:
print e
self.error(u'Excepción', unicode(str(e), 'latin1', 'ignore'))
def on_btnEnviar_click(self, event):
try:
ok = no = 0
self.progreso(0)
for i, item in self.get_selected_items():
if not item['cae'] in ("", "NULL"):
archivo = self.generar_factura(item)
if item.get('email'):
self.enviar_mail(item,archivo)
ok += 1
else:
no += 1
self.log("No se envia factura %s por no tener EMAIL" % item['cbt_numero'])
else:
self.log("No se envia factura %s por no tener CAE" % item['cbt_numero'])
no += 1
self.progreso(i)
self.progreso(len(self.items))
gui.alert('Proceso finalizado OK!\n\nEnviados: %d\nNo enviados: %d' % (ok, no), 'Envio de Email')
except Exception, e:
self.error(u'Excepción',unicode(e))
def generar_factura(self, fila, mostrar=False):
fepdf = FEPDF()
fact = formato_csv.desaplanar([self.cols] + [[item[k] for k in self.cols] for item in [fila]])[0]
fact['cbte_nro'] = fact['cbt_numero']
fact['items'] = fact['detalles']
for d in fact['datos']:
fepdf.AgregarDato(d['campo'], d['valor'], d['pagina'])
# por compatiblidad, completo campos anteriores
if d['campo'] not in fact and d['valor']:
fact[d['campo']] = d['valor']
fepdf.factura = fact
# convertir importe total en texto (palabras):
moneda_ds = {"PES": "PESOS", "DOL": "DOLAR EEUU"}.get(fact.get("moneda_id", ""), "")
fact["en_letras"] = "SON " + moneda_ds + " " + conv_text(float(fact["imp_total"]))
# cargo el formato CSV por defecto (factura.csv)
fepdf.CargarFormato(conf_fact.get("formato", "factura.csv"))
# establezco formatos (cantidad de decimales) según configuración:
fepdf.FmtCantidad = conf_fact.get("fmt_cantidad", "0.2")
fepdf.FmtPrecio = conf_fact.get("fmt_precio", "0.2")
# datos fijos:
fepdf.CUIT = cuit # CUIT del emisor para código de barras
for k, v in conf_pdf.items():
fepdf.AgregarDato(k, v)
fepdf.CrearPlantilla(papel=conf_fact.get("papel", "legal"),
orientacion=conf_fact.get("orientacion", "portrait"))
fepdf.ProcesarPlantilla(num_copias=int(conf_fact.get("copias", 1)),
lineas_max=int(conf_fact.get("lineas_max", 24)),
qty_pos=conf_fact.get("cant_pos") or 'izq')
salida = conf_fact.get("salida", "")
fact = fepdf.factura
if salida:
pass
elif 'pdf' in fact and fact['pdf']:
salida = fact['pdf']
else:
# genero el nombre de archivo según datos de factura
d = conf_fact.get('directorio', ".")
clave_subdir = conf_fact.get('subdirectorio','fecha_cbte')
if clave_subdir:
d = os.path.join(d, item[clave_subdir])
if not os.path.isdir(d):
os.mkdir(d)
fs = conf_fact.get('archivo','numero').split(",")
it = item.copy()
tipo_fact, letra_fact, numero_fact = fact['_fmt_fact']
it['tipo'] = tipo_fact.replace(" ", "_")
it['letra'] = letra_fact
it['numero'] = numero_fact
it['mes'] = item['fecha_cbte'][4:6]
it['año'] = item['fecha_cbte'][0:4]
# remover acentos, ñ del nombre de archivo (vía unicode):
fn = u''.join([unicode(it.get(ff,ff)) for ff in fs])
fn = unicodedata.normalize('NFKD', fn).encode('ASCII', 'ignore')
salida = os.path.join(d, "%s.pdf" % fn)
fepdf.GenerarPDF(archivo=salida)
if mostrar:
fepdf.MostrarPDF(archivo=salida,imprimir='--imprimir' in sys.argv)
return salida
def enviar_mail(self, item, archivo):
archivo = self.generar_factura(item)
if item['email']:
msg = MIMEMultipart()
msg['Subject'] = conf_mail['motivo'].replace("NUMERO",str(item['cbt_numero']))
msg['From'] = conf_mail['remitente']
msg['Reply-to'] = msg['From']
msg['To'] = item['email']
msg.preamble = 'Mensaje de multiples partes.\n'
if not 'html' in conf_mail:
part = MIMEText(conf_mail['cuerpo'])
msg.attach(part)
else:
alt = MIMEMultipart('alternative')
msg.attach(alt)
text = MIMEText(conf_mail['cuerpo'])
alt.attach(text)
# We reference the image in the IMG SRC attribute by the ID we give it below
html = MIMEText(conf_mail['html'], 'html')
alt.attach(html)
part = MIMEApplication(open(archivo,"rb").read())
part.add_header('Content-Disposition', 'attachment', filename=os.path.basename(archivo))
msg.attach(part)
try:
self.log("Enviando email: %s a %s" % (msg['Subject'], msg['To']))
if not self.smtp:
self.smtp = SMTP(conf_mail['servidor'], conf_mail.get('puerto', 25))
if conf_mail['usuario'] and conf_mail['clave']:
self.smtp.ehlo()
self.smtp.login(conf_mail['usuario'], conf_mail['clave'])
to = [msg['To']]
bcc = conf_mail.get('bcc', None)
if bcc:
to.append(bcc)
self.smtp.sendmail(msg['From'], to, msg.as_string())
except Exception,e:
self.error(u'Excepción',unicode(e))
if __name__ == '__main__':
if len(sys.argv)>1 and not sys.argv[1].startswith("-"):
CONFIG_FILE = sys.argv[1]
config = SafeConfigParser()
config.read(CONFIG_FILE)
if not len(config.sections()):
if os.path.exists(CONFIG_FILE):
gui.alert(u"Error al cargar archivo de configuración: %s" %
CONFIG_FILE, "PyRece: Imposible Continuar")
else:
gui.alert(u"No se encuentra archivo de configuración: %s" %
CONFIG_FILE, "PyRece: Imposible Continuar")
sys.exit(1)
cert = config.get('WSAA','CERT')
privatekey = config.get('WSAA','PRIVATEKEY')
cuit = config.get('WSFEv1','CUIT')
if config.has_option('WSFEv1','ENTRADA'):
entrada = config.get('WSFEv1','ENTRADA')
else:
entrada = ""
if not os.path.exists(entrada):
entrada = "facturas.csv"
if config.has_option('WSFEv1','SALIDA'):
salida = config.get('WSFEv1','SALIDA')
else:
salida = "resultado.csv"
if config.has_section('FACTURA'):
conf_fact = dict(config.items('FACTURA'))
else:
conf_fact = {}
conf_pdf = dict(config.items('PDF'))
conf_mail = dict(config.items('MAIL'))
if config.has_section('DBF'):
conf_dbf = dict(config.items('DBF'))
else:
conf_dbf = {}
if config.has_option('WSAA','URL') and not HOMO:
wsaa_url = config.get('WSAA','URL')
else:
wsaa_url = wsaa.WSAAURL
if config.has_option('WSFEv1','URL') and not HOMO:
wsfev1_url = config.get('WSFEv1','URL')
else:
wsfev1_url = wsfev1.WSDL
if config.has_option('WSFEXv1','URL') and not HOMO:
wsfexv1_url = config.get('WSFEXv1','URL')
else:
wsfexv1_url = wsfexv1.WSDL
CACERT = config.has_option('WSAA', 'CACERT') and config.get('WSAA', 'CACERT') or None
WRAPPER = config.has_option('WSAA', 'WRAPPER') and config.get('WSAA', 'WRAPPER') or None
DEFAULT_WEBSERVICE = "wsfev1"
if config.has_section('PYRECE'):
DEFAULT_WEBSERVICE = config.get('PYRECE','WEBSERVICE')
if config.has_section('PROXY'):
proxy_dict = dict(("proxy_%s" % k,v) for k,v in config.items('PROXY'))
proxy_dict['proxy_port'] = int(proxy_dict['proxy_port'])
else:
proxy_dict = {}
c = PyRece()
gui.main_loop()
```
#### File: IronWeb/pyafipws/wsctg.py
```python
__author__ = "<NAME> <<EMAIL>>"
__copyright__ = "Copyright (C) 2010-2014 <NAME>"
__license__ = "LGPL 3.0"
__version__ = "1.14a"
LICENCIA = """
wsctg.py: Interfaz para generar Código de Trazabilidad de Granos AFIP v1.1
Copyright (C) 2014-2015 <NAME> <EMAIL>
http://www.sistemasagiles.com.ar/trac/wiki/CodigoTrazabilidadGranos
Este progarma es software libre, se entrega ABSOLUTAMENTE SIN GARANTIA
y es bienvenido a redistribuirlo bajo la licencia GPLv3.
Para información adicional sobre garantía, soporte técnico comercial
e incorporación/distribución en programas propietarios ver PyAfipWs:
http://www.sistemasagiles.com.ar/trac/wiki/PyAfipWs
"""
AYUDA="""
Opciones:
--ayuda: este mensaje
--debug: modo depuración (detalla y confirma las operaciones)
--formato: muestra el formato de los archivos de entrada/salida
--prueba: genera y autoriza una CTG de prueba (no usar en producción!)
--xml: almacena los requerimientos y respuestas XML (depuración)
--dummy: consulta estado de servidores
--solicitar: obtiene el CTG (según archivo de entrada en TXT o CSV)
--confirmar: confirma el CTG (según archivo de entrada en TXT o CSV)
--anular: anula el CTG
--rechazar: permite al destino rechazar el CTG
--confirmar_arribo: confirma el arribo de un CTG
--confirmar_definitivo: confirma el arribo definitivo de un CTG
--regresar_a_origen_rechazado: tomar la acción de "Regresar a Origen"
--cambiar_destino_destinatario_rechazado: "Cambio de Destino y Destinatario"
--consultar: consulta las CTG generadas
--consultar_excel: consulta las CTG generadas (genera un excel)
--consultar_detalle: obtiene el detalle de una CTG
--consultar_constancia_pdf: descarga el documento PDF de una CTG
--pendientes: consulta CTGs otorgados, rechazados, confirmados a resolver
--consultar_rechazados: obtener CTGs rechazados para darles un nuevo curso
--consultar_activos_por_patente: consulta de CTGs activos por patente
--provincias: obtiene el listado de provincias
--localidades: obtiene el listado de localidades por provincia
--especies: obtiene el listado de especies
--cosechas: obtiene el listado de cosechas
Ver wsctg.ini para parámetros de configuración (URL, certificados, etc.)"
"""
import os, sys, time, base64
from utils import date
import traceback
from pysimplesoap.client import SoapFault
import utils
# importo funciones compartidas:
from utils import leer, escribir, leer_dbf, guardar_dbf, N, A, I, json, BaseWS, inicializar_y_capturar_excepciones, get_install_dir
# constantes de configuración (homologación):
WSDL = "https://fwshomo.afip.gov.ar/wsctg/services/CTGService_v4.0?wsdl"
DEBUG = False
XML = False
CONFIG_FILE = "wsctg.ini"
HOMO = False
# definición del formato del archivo de intercambio:
ENCABEZADO = [
# datos enviados
('tipo_reg', 1, A), # 0: encabezado
('numero_carta_de_porte', 13, N),
('codigo_especie', 5, N),
('cuit_canjeador', 11, N),
('cuit_destino', 11, N),
('cuit_destinatario', 11, N),
('codigo_localidad_origen', 6, N),
('codigo_localidad_destino', 6, N),
('codigo_cosecha', 4, N),
('peso_neto_carga', 5, N),
('cant_horas', 2, N),
('reservado1', 6, A),
('cuit_transportista', 11, N),
('km_a_recorrer', 4, N), # km_recorridos (en consulta WSCTGv2)
('establecimiento', 6, N), # confirmar arribo
('remitente_comercial_como_canjeador', 1, A), # S/N solicitar CTG inicial (WSCTGv2)
('consumo_propio', 1, A), # S/N confirmar arribo (WSCTGv2)
# datos devueltos
('numero_ctg', 8, N),
('fecha_hora', 19, A),
('vigencia_desde', 10, A),
('vigencia_hasta', 10, A),
('transaccion', 12, N),
('tarifa_referencia', 6, I, 2), # consultar detalle
('estado', 20, A),
('imprime_constancia', 5, A),
('observaciones', 200, A),
('errores', 1000, A),
('controles', 1000, A),
('detalle', 1000, A), # consultar detalle (WSCTGv2)
# nuevos campos agregados:
('cuit_chofer', 11, N),
# nuevos campos agregados WSCTGv3:
('cuit_corredor', 12, N),
('remitente_comercial_como_productor', 1, A),
('patente_vehiculo', 10, A),
# nuevos campos agregados WSCTGv4:
('ctc_codigo', 2, A),
('turno', 20, A),
]
class WSCTG(BaseWS):
"Interfaz para el WebService de Código de Trazabilidad de Granos (Version 3)"
_public_methods_ = ['Conectar', 'Dummy', 'SetTicketAcceso', 'DebugLog',
'SolicitarCTGInicial', 'SolicitarCTGDatoPendiente',
'ConfirmarArribo', 'ConfirmarDefinitivo',
'AnularCTG', 'RechazarCTG', 'CTGsPendientesResolucion',
'ConsultarCTG', 'LeerDatosCTG', 'ConsultarDetalleCTG',
'ConsultarCTGExcel', 'ConsultarConstanciaCTGPDF',
'ConsultarCTGRechazados',
'RegresarAOrigenCTGRechazado',
'CambiarDestinoDestinatarioCTGRechazado',
'ConsultarCTGActivosPorPatente',
'ConsultarProvincias',
'ConsultarLocalidadesPorProvincia',
'ConsultarEstablecimientos',
'ConsultarCosechas',
'ConsultarEspecies',
'SetParametros', 'SetParametro', 'GetParametro',
'AnalizarXml', 'ObtenerTagXml', 'LoadTestXML',
]
_public_attrs_ = ['Token', 'Sign', 'Cuit',
'AppServerStatus', 'DbServerStatus', 'AuthServerStatus',
'Excepcion', 'ErrCode', 'ErrMsg', 'LanzarExcepciones', 'Errores',
'XmlRequest', 'XmlResponse', 'Version', 'Traceback',
'NumeroCTG', 'CartaPorte', 'FechaHora', 'CodigoOperacion',
'CodigoTransaccion', 'Observaciones', 'Controles', 'DatosCTG',
'VigenciaHasta', 'VigenciaDesde', 'Estado', 'ImprimeConstancia',
'TarifaReferencia', 'Destino', 'Destinatario', 'Detalle',
'Patente', 'PesoNeto', 'FechaVencimiento',
'UsuarioSolicitante', 'UsuarioReal', 'CtcCodigo', 'Turno',
]
_reg_progid_ = "WSCTG"
_reg_clsid_ = "{4383E947-57C4-47C5-8419-85221580CB48}"
# Variables globales para BaseWS:
HOMO = HOMO
WSDL = WSDL
LanzarExcepciones = False
Version = "%s %s" % (__version__, HOMO and 'Homologación' or '')
def Conectar(self, *args, **kwargs):
ret = BaseWS.Conectar(self, *args, **kwargs)
# corregir descripción de servicio WSDL publicado por AFIP
# kmARecorrer -> kmRecorridos (ConsultarDetalleCTG)
port = self.client.services['CTGService_v4.0']['ports']['CTGServiceHttpSoap20Endpoint']
msg = port['operations']['consultarDetalleCTG']['output']['consultarDetalleCTGResponse']
msg['response']['consultarDetalleCTGDatos']['kmRecorridos'] = int
return ret
def inicializar(self):
self.AppServerStatus = self.DbServerStatus = self.AuthServerStatus = None
self.CodError = self.DescError = ''
self.NumeroCTG = self.CartaPorte = ""
self.CodigoTransaccion = self.Observaciones = ''
self.FechaHora = self.CodigoOperacion = ""
self.VigenciaDesde = self.VigenciaHasta = ""
self.Controles = []
self.DatosCTG = self.TarifaReferencia = None
self.CodigoTransaccion = self.Observaciones = ''
self.Detalle = self.Destino = self.Destinatario = ''
self.Patente = self.PesoNeto = self.FechaVencimiento = ''
self.UsuarioSolicitante = self.UsuarioReal = ''
self.CtcCodigo = self.Turno = ""
def __analizar_errores(self, ret):
"Comprueba y extrae errores si existen en la respuesta XML"
if 'arrayErrores' in ret:
errores = ret['arrayErrores'] or []
self.Errores = [err['error'] for err in errores]
self.ErrCode = ' '.join(self.Errores)
self.ErrMsg = '\n'.join(self.Errores)
def __analizar_controles(self, ret):
"Comprueba y extrae controles si existen en la respuesta XML"
if 'arrayControles' in ret:
controles = ret['arrayControles']
self.Controles = ["%(tipo)s: %(descripcion)s" % ctl['control']
for ctl in controles]
@inicializar_y_capturar_excepciones
def Dummy(self):
"Obtener el estado de los servidores de la AFIP"
results = self.client.dummy()['response']
self.AppServerStatus = str(results['appserver'])
self.DbServerStatus = str(results['dbserver'])
self.AuthServerStatus = str(results['authserver'])
@inicializar_y_capturar_excepciones
def AnularCTG(self, carta_porte, ctg):
"Anular el CTG si se creó el mismo por error"
response = self.client.anularCTG(request=dict(
auth={
'token': self.Token, 'sign': self.Sign,
'cuitRepresentado': self.Cuit, },
datosAnularCTG={
'cartaPorte': carta_porte,
'ctg': ctg, }))['response']
datos = response.get('datosResponse')
self.__analizar_errores(response)
if datos:
self.CartaPorte = str(datos['cartaPorte'])
self.NumeroCTG = str(datos['CTG'])
self.FechaHora = str(datos['fechaHora'])
self.CodigoOperacion = str(datos['codigoOperacion'])
@inicializar_y_capturar_excepciones
def RechazarCTG(self, carta_porte, ctg, motivo):
"El Destino puede rechazar el CTG a través de la siguiente operatoria"
response = self.client.rechazarCTG(request=dict(
auth={
'token': self.Token, 'sign': self.Sign,
'cuitRepresentado': self.Cuit, },
datosRechazarCTG={
'cartaPorte': carta_porte,
'ctg': ctg, 'motivoRechazo': motivo,
}))['response']
datos = response.get('datosResponse')
self.__analizar_errores(response)
if datos:
self.CartaPorte = str(datos['cartaPorte'])
self.NumeroCTG = str(datos['CTG'])
self.FechaHora = str(datos['fechaHora'])
self.CodigoOperacion = str(datos['codigoOperacion'])
@inicializar_y_capturar_excepciones
def SolicitarCTGInicial(self, numero_carta_de_porte, codigo_especie,
cuit_canjeador, cuit_destino, cuit_destinatario, codigo_localidad_origen,
codigo_localidad_destino, codigo_cosecha, peso_neto_carga,
cant_horas=None, patente_vehiculo=None, cuit_transportista=None,
km_a_recorrer=None, remitente_comercial_como_canjeador=None,
cuit_corredor=None, remitente_comercial_como_productor=None,
turno=None,
**kwargs):
"Solicitar CTG Desde el Inicio"
# ajusto parámetros según validaciones de AFIP:
if cuit_canjeador and int(cuit_canjeador) == 0:
cuit_canjeador = None # nulo
if not remitente_comercial_como_canjeador:
remitente_comercial_como_canjeador = None
if not remitente_comercial_como_productor:
remitente_comercial_como_productor = None
ret = self.client.solicitarCTGInicial(request=dict(
auth={
'token': self.Token, 'sign': self.Sign,
'cuitRepresentado': self.Cuit, },
datosSolicitarCTGInicial=dict(
cartaPorte=numero_carta_de_porte,
codigoEspecie=codigo_especie,
cuitCanjeador=cuit_canjeador or None,
remitenteComercialComoCanjeador=remitente_comercial_como_canjeador,
cuitDestino=cuit_destino,
cuitDestinatario=cuit_destinatario,
codigoLocalidadOrigen=codigo_localidad_origen,
codigoLocalidadDestino=codigo_localidad_destino,
codigoCosecha=codigo_cosecha,
pesoNeto=peso_neto_carga,
cuitTransportista=cuit_transportista,
cantHoras=cant_horas,
patente=patente_vehiculo,
kmARecorrer=km_a_recorrer,
cuitCorredor=cuit_corredor,
remitenteComercialcomoProductor=remitente_comercial_como_productor,
turno=turno,
)))['response']
self.__analizar_errores(ret)
self.Observaciones = ret['observacion']
datos = ret.get('datosSolicitarCTGResponse')
if datos:
self.CartaPorte = str(datos['cartaPorte'])
datos_ctg = datos.get('datosSolicitarCTG')
if datos_ctg:
self.NumeroCTG = str(datos_ctg['ctg'])
self.FechaHora = str(datos_ctg['fechaEmision'])
self.VigenciaDesde = str(datos_ctg['fechaVigenciaDesde'])
self.VigenciaHasta = str(datos_ctg['fechaVigenciaHasta'])
self.TarifaReferencia = str(datos_ctg.get('tarifaReferencia'))
self.__analizar_controles(datos)
return self.NumeroCTG or 0
@inicializar_y_capturar_excepciones
def SolicitarCTGDatoPendiente(self, numero_carta_de_porte, cant_horas,
patente_vehiculo, cuit_transportista, patente=None, turno=None):
"Solicitud que permite completar los datos faltantes de un Pre-CTG "
"generado anteriormente a través de la operación solicitarCTGInicial"
ret = self.client.solicitarCTGDatoPendiente(request=dict(
auth={
'token': self.Token, 'sign': self.Sign,
'cuitRepresentado': self.Cuit, },
datosSolicitarCTGDatoPendiente=dict(
cartaPorte=numero_carta_de_porte,
cuitTransportista=cuit_transportista,
cantHoras=cant_horas,
patente=patente,
turno=turno,
)))['response']
self.__analizar_errores(ret)
self.Observaciones = ret['observacion']
datos = ret.get('datosSolicitarCTGResponse')
if datos:
self.CartaPorte = str(datos['cartaPorte'])
datos_ctg = datos.get('datosSolicitarCTG')
if datos_ctg:
self.NumeroCTG = str(datos_ctg['ctg'])
self.FechaHora = str(datos_ctg['fechaEmision'])
self.VigenciaDesde = str(datos_ctg['fechaVigenciaDesde'])
self.VigenciaHasta = str(datos_ctg['fechaVigenciaHasta'])
self.TarifaReferencia = str(datos_ctg.get('tarifaReferencia'))
self.__analizar_controles(datos)
return self.NumeroCTG
@inicializar_y_capturar_excepciones
def ConfirmarArribo(self, numero_carta_de_porte, numero_ctg,
cuit_transportista, peso_neto_carga,
consumo_propio, establecimiento=None, cuit_chofer=None,
**kwargs):
"Confirma arribo CTG"
ret = self.client.confirmarArribo(request=dict(
auth={
'token': self.Token, 'sign': self.Sign,
'cuitRepresentado': self.Cuit, },
datosConfirmarArribo=dict(
cartaPorte=numero_carta_de_porte,
ctg=numero_ctg,
cuitTransportista=cuit_transportista,
cuitChofer=cuit_chofer,
cantKilosCartaPorte=peso_neto_carga,
consumoPropio=consumo_propio,
establecimiento=establecimiento,
)))['response']
self.__analizar_errores(ret)
datos = ret.get('datosResponse')
if datos:
self.CartaPorte = str(datos['cartaPorte'])
self.NumeroCTG = str(datos['ctg'])
self.FechaHora = str(datos['fechaHora'])
self.CodigoTransaccion = str(datos['codigoOperacion'])
self.Observaciones = ""
return self.CodigoTransaccion
@inicializar_y_capturar_excepciones
def ConfirmarDefinitivo(self, numero_carta_de_porte, numero_ctg,
establecimiento=None, codigo_cosecha=None, peso_neto_carga=None,
**kwargs):
"Confirma arribo definitivo CTG"
ret = self.client.confirmarDefinitivo(request=dict(
auth={
'token': self.Token, 'sign': self.Sign,
'cuitRepresentado': self.Cuit, },
datosConfirmarDefinitivo=dict(
cartaPorte=numero_carta_de_porte,
ctg=numero_ctg,
establecimiento=establecimiento,
codigoCosecha=codigo_cosecha,
pesoNeto=peso_neto_carga,
)))['response']
self.__analizar_errores(ret)
datos = ret.get('datosResponse')
if datos:
self.CartaPorte = str(datos['cartaPorte'])
self.NumeroCTG = str(datos['ctg'])
self.FechaHora = str(datos['fechaHora'])
self.CodigoTransaccion = str(datos.get('codigoOperacion', ""))
self.Observaciones = ""
return self.CodigoTransaccion
@inicializar_y_capturar_excepciones
def RegresarAOrigenCTGRechazado(self, numero_carta_de_porte, numero_ctg,
km_a_recorrer=None,
**kwargs):
"Al consultar los CTGs rechazados se puede Regresar a Origen"
ret = self.client.regresarAOrigenCTGRechazado(request=dict(
auth={
'token': self.Token, 'sign': self.Sign,
'cuitRepresentado': self.Cuit, },
datosRegresarAOrigenCTGRechazado=dict(
cartaPorte=numero_carta_de_porte,
ctg=numero_ctg, kmARecorrer=km_a_recorrer,
)))['response']
self.__analizar_errores(ret)
datos = ret.get('datosResponse')
if datos:
self.CartaPorte = str(datos['cartaPorte'])
self.NumeroCTG = str(datos['ctg'])
self.FechaHora = str(datos['fechaHora'])
self.CodigoTransaccion = str(datos['codigoOperacion'])
self.Observaciones = ""
return self.CodigoTransaccion
@inicializar_y_capturar_excepciones
def CambiarDestinoDestinatarioCTGRechazado(self, numero_carta_de_porte,
numero_ctg, codigo_localidad_destino=None,
cuit_destino=None, cuit_destinatario=None,
km_a_recorrer=None, turno=None,
**kwargs):
"Tomar acción de Cambio de Destino y Destinatario para CTG rechazado"
ret = self.client.cambiarDestinoDestinatarioCTGRechazado(request=dict(
auth={
'token': self.Token, 'sign': self.Sign,
'cuitRepresentado': self.Cuit, },
datosCambiarDestinoDestinatarioCTGRechazado=dict(
cartaPorte=numero_carta_de_porte,
ctg=numero_ctg,
codigoLocalidadDestino=codigo_localidad_destino,
cuitDestino=cuit_destino,
cuitDestinatario=cuit_destinatario,
kmARecorrer=km_a_recorrer,
turno=turno,
)))['response']
self.__analizar_errores(ret)
datos = ret.get('datosResponse')
if datos:
self.CartaPorte = str(datos['cartaPorte'])
self.NumeroCTG = str(datos['ctg'])
self.FechaHora = str(datos['fechaHora'])
self.CodigoTransaccion = str(datos['codigoOperacion'])
self.Observaciones = ""
return self.CodigoTransaccion
@inicializar_y_capturar_excepciones
def ConsultarCTG(self, numero_carta_de_porte=None, numero_ctg=None,
patente=None, cuit_solicitante=None, cuit_destino=None,
fecha_emision_desde=None, fecha_emision_hasta=None):
"Operación que realiza consulta de CTGs según el criterio ingresado."
ret = self.client.consultarCTG(request=dict(
auth={
'token': self.Token, 'sign': self.Sign,
'cuitRepresentado': self.Cuit, },
consultarCTGDatos=dict(
cartaPorte=numero_carta_de_porte,
ctg=numero_ctg,
patente=patente,
cuitSolicitante=cuit_solicitante,
cuitDestino=cuit_destino,
fechaEmisionDesde=fecha_emision_desde,
fechaEmisionHasta=fecha_emision_hasta,
)))['response']
self.__analizar_errores(ret)
datos = ret.get('arrayDatosConsultarCTG')
if datos:
self.DatosCTG = datos
self.LeerDatosCTG(pop=False)
return True
else:
self.DatosCTG = []
return ''
@inicializar_y_capturar_excepciones
def ConsultarCTGRechazados(self):
"Consulta de CTGs Otorgados, CTGs Rechazados y CTGs Confirmados"
ret = self.client.consultarCTGRechazados(request=dict(
auth={
'token': self.Token, 'sign': self.Sign,
'cuitRepresentado': self.Cuit, },
))['response']
self.__analizar_errores(ret)
datos = ret.get('arrayConsultarCTGRechazados')
if datos:
self.DatosCTG = datos
self.LeerDatosCTG(pop=False)
return True
else:
self.DatosCTG = []
return False
@inicializar_y_capturar_excepciones
def ConsultarCTGActivosPorPatente(self, patente="ZZZ999"):
"Consulta de CTGs activos por patente"
ret = self.client.consultarCTGActivosPorPatente(request=dict(
auth={
'token': self.Token, 'sign': self.Sign,
'cuitRepresentado': self.Cuit, },
patente=patente,
))['response']
self.__analizar_errores(ret)
datos = ret.get('arrayConsultarCTGActivosPorPatenteResponse')
if datos:
self.DatosCTG = datos
self.LeerDatosCTG(pop=False)
return True
else:
self.DatosCTG = []
return False
@inicializar_y_capturar_excepciones
def CTGsPendientesResolucion(self):
"Consulta de CTGs Otorgados, CTGs Rechazados y CTGs Confirmados"
ret = self.client.CTGsPendientesResolucion(request=dict(
auth={
'token': self.Token, 'sign': self.Sign,
'cuitRepresentado': self.Cuit, },
))['response']
self.__analizar_errores(ret)
if ret:
self.DatosCTG = ret
return True
else:
self.DatosCTG = {}
return False
def LeerDatosCTG(self, clave='', pop=True):
"Recorro los datos devueltos y devuelvo el primero si existe"
if clave and self.DatosCTG:
# obtengo la lista por estado pendiente de resolución ("array")
datos = self.DatosCTG[clave]
else:
# uso directamente la lista devuelta por la consulta
datos = self.DatosCTG
if datos:
# extraigo el primer item
if pop:
datos = datos.pop(0)
else:
datos = datos[0]
for det in ('datosConsultarCTG', 'detalleConsultaCTGRechazado',
'detalleConsultaCTGActivo'):
if det in datos:
datos_ctg = datos[det]
break
else:
# elemento del array no encontrado:
return ""
self.CartaPorte = str(datos_ctg['cartaPorte'])
self.NumeroCTG = str(datos_ctg['ctg'])
self.Estado = unicode(datos_ctg.get('estado', ""))
self.ImprimeConstancia = str(datos_ctg.get('imprimeConstancia', ""))
for campo in ("fechaRechazo", "fechaEmision", "fechaSolicitud",
"fechaConfirmacionArribo"):
if campo in datos_ctg:
self.FechaHora = str(datos_ctg.get(campo))
self.Destino = datos_ctg.get("destino", "")
self.Destinatario = datos_ctg.get("destinatario", "")
self.Observaciones = datos_ctg.get("observaciones", "")
self.Patente = datos_ctg.get("patente")
self.PesoNeto = datos_ctg.get("pesoNeto")
self.FechaVencimiento = datos_ctg.get("fechaVencimiento")
self.UsuarioSolicitante = datos_ctg.get("usuarioSolicitante")
self.UsuarioReal = datos_ctg.get("usuarioReal")
self.CtcCodigo = datos_ctg.get("ctcCodigo")
self.Turno = datos_ctg.get("turno")
return self.NumeroCTG
else:
return ""
@inicializar_y_capturar_excepciones
def ConsultarCTGExcel(self, numero_carta_de_porte=None, numero_ctg=None,
patente=None, cuit_solicitante=None, cuit_destino=None,
fecha_emision_desde=None, fecha_emision_hasta=None,
archivo="planilla.xls"):
"Operación que realiza consulta de CTGs, graba una planilla xls"
ret = self.client.consultarCTGExcel(request=dict(
auth={
'token': self.Token, 'sign': self.Sign,
'cuitRepresentado': self.Cuit, },
consultarCTGDatos=dict(
cartaPorte=numero_carta_de_porte,
ctg=numero_ctg,
patente=patente,
cuitSolicitante=cuit_solicitante,
cuitDestino=cuit_destino,
fechaEmisionDesde=fecha_emision_desde,
fechaEmisionHasta=fecha_emision_hasta,
)))['response']
self.__analizar_errores(ret)
datos = base64.b64decode(ret.get('archivo') or "")
f = open(archivo, "wb")
f.write(datos)
f.close()
return True
@inicializar_y_capturar_excepciones
def ConsultarDetalleCTG(self, numero_ctg=None):
"Operación mostrar este detalle de la solicitud de CTG seleccionada."
ret = self.client.consultarDetalleCTG(request=dict(
auth={
'token': self.Token, 'sign': self.Sign,
'cuitRepresentado': self.Cuit, },
ctg=numero_ctg,
))['response']
self.__analizar_errores(ret)
datos = ret.get('consultarDetalleCTGDatos')
if datos:
self.NumeroCTG = str(datos['ctg'])
self.CartaPorte = str(datos['cartaPorte'])
self.Estado = unicode(datos['estado'])
self.FechaHora = str(datos['fechaEmision'])
self.VigenciaDesde = str(datos['fechaVigenciaDesde'])
self.VigenciaHasta = str(datos['fechaVigenciaHasta'])
self.TarifaReferencia = str(datos['tarifaReferencia'])
self.Detalle = str(datos.get('detalle', ""))
return True
@inicializar_y_capturar_excepciones
def ConsultarConstanciaCTGPDF(self, numero_ctg=None,
archivo="constancia.pdf"):
"Operación Consultar Constancia de CTG en PDF"
ret = self.client.consultarConstanciaCTGPDF(request=dict(
auth={
'token': self.Token, 'sign': self.Sign,
'cuitRepresentado': self.Cuit, },
ctg=numero_ctg,
))['response']
self.__analizar_errores(ret)
datos = base64.b64decode(ret.get('archivo', ""))
f = open(archivo, "wb")
f.write(datos)
f.close()
return True
@inicializar_y_capturar_excepciones
def ConsultarProvincias(self, sep="||"):
ret = self.client.consultarProvincias(request=dict(
auth={
'token': self.Token, 'sign': self.Sign,
'cuitRepresentado': self.Cuit, },
))['consultarProvinciasResponse']
self.__analizar_errores(ret)
array = ret.get('arrayProvincias', [])
return [("%s %%s %s %%s %s" % (sep, sep, sep)) %
(it['provincia']['codigo'],
it['provincia']['descripcion'])
for it in array]
@inicializar_y_capturar_excepciones
def ConsultarLocalidadesPorProvincia(self, codigo_provincia, sep="||"):
ret = self.client.consultarLocalidadesPorProvincia(request=dict(
auth={
'token': self.Token, 'sign': self.Sign,
'cuitRepresentado': self.Cuit, },
codigoProvincia=codigo_provincia,
))['response']
self.__analizar_errores(ret)
array = ret.get('arrayLocalidades', [])
return [("%s %%s %s %%s %s" % (sep, sep, sep)) %
(it['localidad']['codigo'],
it['localidad']['descripcion'])
for it in array]
@inicializar_y_capturar_excepciones
def ConsultarEstablecimientos(self, sep="||"):
ret = self.client.consultarEstablecimientos(request=dict(
auth={
'token': self.Token, 'sign': self.Sign,
'cuitRepresentado': self.Cuit, },
))['response']
self.__analizar_errores(ret)
array = ret.get('arrayEstablecimientos', [])
return [("%s" %
(it['establecimiento'],))
for it in array]
@inicializar_y_capturar_excepciones
def ConsultarEspecies(self, sep="||"):
ret = self.client.consultarEspecies(request=dict(
auth={
'token': self.Token, 'sign': self.Sign,
'cuitRepresentado': self.Cuit, },
))['response']
self.__analizar_errores(ret)
array = ret.get('arrayEspecies', [])
return [("%s %%s %s %%s %s" % (sep, sep, sep)) %
(it['especie']['codigo'],
it['especie']['descripcion'])
for it in array]
@inicializar_y_capturar_excepciones
def ConsultarCosechas(self, sep="||"):
ret = self.client.consultarCosechas(request=dict(
auth={
'token': self.Token, 'sign': self.Sign,
'cuitRepresentado': self.Cuit, },
))['response']
self.__analizar_errores(ret)
array = ret.get('arrayCosechas', [])
return [("%s %%s %s %%s %s" % (sep, sep, sep)) %
(it['cosecha']['codigo'],
it['cosecha']['descripcion'])
for it in array]
def leer_archivo(nombre_archivo):
archivo = open(nombre_archivo, "r")
items = []
ext = os.path.splitext(nombre_archivo)[1]
if ext == '.csv':
csv_reader = csv.reader(open(ENTRADA), dialect='excel', delimiter=";")
for row in csv_reader:
items.append(row)
cols = [str(it).strip() for it in items[0]]
# armar diccionario por cada linea
items = [dict([(cols[i],str(v).strip()) for i,v in enumerate(item)]) for item in items[1:]]
return cols, items
elif ext == '.json':
items = json.load(archivo)
elif ext == '.dbf':
dic = {}
formatos = [('Encabezado', ENCABEZADO, dic), ]
leer_dbf(formatos, conf_dbf)
items = [dic]
elif ext == '.txt':
dic = {}
for linea in archivo:
if str(linea[0])=='0':
dic.update(leer(linea, ENCABEZADO))
else:
print "Tipo de registro incorrecto:", linea[0]
items.append(dic)
else:
raise RuntimeError("Extension de archivo desconocida: %s" % ext)
archivo.close()
cols = [k[0] for k in ENCABEZADO]
return cols, items
def escribir_archivo(cols, items, nombre_archivo, agrega=False):
archivo = open(nombre_archivo, agrega and "a" or "w")
ext = os.path.splitext(nombre_archivo)[1]
if ext == '.csv':
csv_writer = csv.writer(archivo, dialect='excel', delimiter=";")
csv_writer.writerows([cols])
csv_writer.writerows([[item[k] for k in cols] for item in items])
elif ext == '.json':
json.dump(items, archivo, sort_keys=True, indent=4)
elif ext == '.dbf':
formatos = [('Encabezado', ENCABEZADO, items), ]
guardar_dbf(formatos, True, conf_dbf)
elif ext == '.txt':
for dic in items:
dic['tipo_reg'] = 0
archivo.write(escribir(dic, ENCABEZADO))
else:
raise RuntimeError("Extension de archivo desconocida: %s" % ext)
archivo.close()
class WSCTGv2(BaseWS):
_reg_progid_ = "WSCTGv2"
_reg_clsid_ = "{ACDEFB8A-34E1-48CF-94E8-6AF6ADA0717A}"
# busco el directorio de instalación (global para que no cambie si usan otra dll)
if not hasattr(sys, "frozen"):
basepath = __file__
elif sys.frozen=='dll':
import win32api
basepath = win32api.GetModuleFileName(sys.frozendllhandle)
else:
basepath = sys.executable
INSTALL_DIR = WSCTG.InstallDir = WSCTGv2.InstallDir = get_install_dir()
if __name__ == '__main__':
if '--ayuda' in sys.argv:
print LICENCIA
print AYUDA
sys.exit(0)
if '--formato' in sys.argv:
print "Formato:"
for msg, formato in [('Encabezado', ENCABEZADO), ]:
comienzo = 1
print "=== %s ===" % msg
for fmt in formato:
clave, longitud, tipo = fmt[0:3]
dec = len(fmt)>3 and fmt[3] or (tipo=='I' and '2' or '')
print " * Campo: %-20s Posición: %3d Longitud: %4d Tipo: %s Decimales: %s" % (
clave, comienzo, longitud, tipo, dec)
comienzo += longitud
sys.exit(0)
if "--register" in sys.argv or "--unregister" in sys.argv:
import win32com.server.register
win32com.server.register.UseCommandLine(WSCTG)
# Compatibilidad hacia atrás:
win32com.server.register.UseCommandLine(WSCTGv2)
sys.exit(0)
import csv
from ConfigParser import SafeConfigParser
try:
if "--version" in sys.argv:
print "Versión: ", __version__
for arg in sys.argv[1:]:
if arg.startswith("--"):
break
print "Usando configuración:", arg
CONFIG_FILE = arg
config = SafeConfigParser()
config.read(CONFIG_FILE)
CERT = config.get('WSAA','CERT')
PRIVATEKEY = config.get('WSAA','PRIVATEKEY')
CUIT = config.get('WSCTG','CUIT')
ENTRADA = config.get('WSCTG','ENTRADA')
SALIDA = config.get('WSCTG','SALIDA')
if config.has_option('WSAA','URL') and not HOMO:
wsaa_url = config.get('WSAA','URL')
else:
wsaa_url = None
if config.has_option('WSCTG','URL') and not HOMO:
wsctg_url = config.get('WSCTG','URL')
else:
wsctg_url = WSDL
if config.has_section('DBF'):
conf_dbf = dict(config.items('DBF'))
if DEBUG: print "conf_dbf", conf_dbf
else:
conf_dbf = {}
DEBUG = '--debug' in sys.argv
XML = '--xml' in sys.argv
if DEBUG:
print "Usando Configuración:"
print "wsaa_url:", wsaa_url
print "wsctg_url:", wsctg_url
# obteniendo el TA
from wsaa import WSAA
wsaa = WSAA()
ta = wsaa.Autenticar("wsctg", CERT, PRIVATEKEY, wsaa_url, debug=DEBUG)
if not ta:
sys.exit("Imposible autenticar con WSAA: %s" % wsaa.Excepcion)
# cliente soap del web service
wsctg = WSCTG()
wsctg.Conectar(wsdl=wsctg_url)
wsctg.SetTicketAcceso(ta)
wsctg.Cuit = CUIT
if '--dummy' in sys.argv:
ret = wsctg.Dummy()
print "AppServerStatus", wsctg.AppServerStatus
print "DbServerStatus", wsctg.DbServerStatus
print "AuthServerStatus", wsctg.AuthServerStatus
sys.exit(0)
if '--anular' in sys.argv:
i = sys.argv.index("--anular")
##print wsctg.client.help("anularCTG")
if i + 2 > len(sys.argv) or sys.argv[i + 1].startswith("--"):
carta_porte = raw_input("Ingrese Carta de Porte: ")
ctg = raw_input("Ingrese CTG: ")
else:
carta_porte = sys.argv[i + 1]
ctg = sys.argv[i + 2]
ret = wsctg.AnularCTG(carta_porte, ctg)
wsctg.SolicitarCTGDatoPendiente()
print "Carta Porte", wsctg.CartaPorte
print "Numero CTG", wsctg.NumeroCTG
print "Fecha y Hora", wsctg.FechaHora
print "Codigo Anulacion de CTG", wsctg.CodigoOperacion
print "Errores:", wsctg.Errores
sys.exit(0)
if '--rechazar' in sys.argv:
i = sys.argv.index("--rechazar")
##print wsctg.client.help("rechazarCTG")
if i + 3 > len(sys.argv) or sys.argv[i + 1].startswith("--"):
carta_porte = raw_input("Ingrese Carta de Porte: ")
ctg = raw_input("Ingrese CTG: ")
motivo = raw_input("Motivo: ")
else:
carta_porte = sys.argv[i + 1]
ctg = sys.argv[i + 2]
motivo = sys.argv[i + 3]
ret = wsctg.RechazarCTG(carta_porte, ctg, motivo)
print "Carta Porte", wsctg.CartaPorte
print "Numero CTG", wsctg.NumeroCTG
print "Fecha y Hora", wsctg.FechaHora
print "Codigo Anulacion de CTG", wsctg.CodigoOperacion
print "Errores:", wsctg.Errores
sys.exit(0)
# Recuperar parámetros:
if '--provincias' in sys.argv:
ret = wsctg.ConsultarProvincias()
print "\n".join(ret)
if '--localidades' in sys.argv:
ret = wsctg.ConsultarLocalidadesPorProvincia(16)
print "\n".join(ret)
if '--especies' in sys.argv:
ret = wsctg.ConsultarEspecies()
print "\n".join(ret)
if '--cosechas' in sys.argv:
ret = wsctg.ConsultarCosechas()
print "\n".join(ret)
if '--establecimientos' in sys.argv:
ret = wsctg.ConsultarEstablecimientos()
print "\n".join(ret)
if '--prueba' in sys.argv or '--formato' in sys.argv:
prueba = dict(numero_carta_de_porte=512345679, codigo_especie=23,
cuit_canjeador=0, #30660685908,
cuit_destino=20111111112, cuit_destinatario=20222222223,
codigo_localidad_origen=3058, codigo_localidad_destino=3059,
codigo_cosecha='1314', peso_neto_carga=1000,
km_a_recorrer=1234,
observaciones='', establecimiento=1,
)
if [argv for argv in sys.argv if argv.startswith(("--confirmar",
"--regresar", '--cambiar'))]:
prueba.update(dict(
numero_ctg="49241727", transaccion='10000001681',
consumo_propio='S',
))
parcial = dict(
cant_horas=1,
patente_vehiculo='APE652', cuit_transportista=20333333334,
)
if not '--parcial' in sys.argv:
prueba.update(parcial)
escribir_archivo(prueba.keys(), [prueba], ENTRADA)
cols, items = leer_archivo(ENTRADA)
ctg = None
if '--solicitar' in sys.argv:
wsctg.LanzarExcepciones = True
for it in items:
print "solicitando...", ' '.join(['%s=%s' % (k,v) for k,v in it.items()])
ctg = wsctg.SolicitarCTGInicial(**it)
print "numero CTG: ", ctg
print "Observiacion: ", wsctg.Observaciones
print "Carta Porte", wsctg.CartaPorte
print "Numero CTG", wsctg.NumeroCTG
print "Fecha y Hora", wsctg.FechaHora
print "Vigencia Desde", wsctg.VigenciaDesde
print "Vigencia Hasta", wsctg.VigenciaHasta
print "Tarifa Referencia: ", wsctg.TarifaReferencia
print "Errores:", wsctg.Errores
print "Controles:", wsctg.Controles
it['numero_ctg'] = wsctg.NumeroCTG
it['tarifa_referencia'] = wsctg.TarifaReferencia
it['observaciones'] = wsctg.Observaciones
it['fecha_hora'] = wsctg.FechaHora
it['vigencia_desde'] = wsctg.VigenciaDesde
it['vigencia_hasta'] = wsctg.VigenciaHasta
it['errores'] = '|'.join(wsctg.Errores)
it['controles'] = '|'.join(wsctg.Controles)
if '--parcial' in sys.argv:
wsctg.LanzarExcepciones = True
for it in items:
print "solicitando dato pendiente...", ' '.join(['%s=%s' % (k,v) for k,v in parcial.items()])
ctg = wsctg.SolicitarCTGDatoPendiente(
numero_carta_de_porte=wsctg.CartaPorte,
**parcial)
print "numero CTG: ", ctg
print "Observiacion: ", wsctg.Observaciones
print "Carta Porte", wsctg.CartaPorte
print "Numero CTG", wsctg.NumeroCTG
print "Fecha y Hora", wsctg.FechaHora
print "Vigencia Desde", wsctg.VigenciaDesde
print "Vigencia Hasta", wsctg.VigenciaHasta
print "Tarifa Referencia: ", wsctg.TarifaReferencia
print "Errores:", wsctg.Errores
print "Controles:", wsctg.Controles
it['numero_ctg'] = wsctg.NumeroCTG
it['tarifa_referencia'] = wsctg.TarifaReferencia
it['errores'] = '|'.join(wsctg.Errores)
it['controles'] = '|'.join(wsctg.Controles)
if '--confirmar_arribo' in sys.argv:
for it in items:
print "confirmando...", ' '.join(['%s=%s' % (k,v) for k,v in it.items()])
transaccion = wsctg.ConfirmarArribo(**it)
print "transaccion: %s" % (transaccion, )
print "Fecha y Hora", wsctg.FechaHora
print "Errores:", wsctg.Errores
it['transaccion'] = transaccion
it['errores'] = '|'.join(wsctg.Errores)
it['controles'] = '|'.join(wsctg.Controles)
if '--confirmar_definitivo' in sys.argv:
if '--testing' in sys.argv:
wsctg.LoadTestXML("wsctg_confirmar_def.xml") # cargo respuesta
for it in items:
print "confirmando...", ' '.join(['%s=%s' % (k,v) for k,v in it.items()])
transaccion = wsctg.ConfirmarDefinitivo(**it)
print "transaccion: %s" % (transaccion, )
print "Fecha y Hora", wsctg.FechaHora
print "Errores:", wsctg.Errores
it['transaccion'] = transaccion
it['errores'] = '|'.join(wsctg.Errores)
it['controles'] = '|'.join(wsctg.Errores)
if '--regresar_a_origen_rechazado' in sys.argv:
for it in items:
print "regresando...", ' '.join(['%s=%s' % (k,v) for k,v in it.items()])
transaccion = wsctg.RegresarAOrigenCTGRechazado(**it)
print "transaccion: %s" % (transaccion, )
print "Fecha y Hora", wsctg.FechaHora
print "Errores:", wsctg.Errores
it['transaccion'] = transaccion
it['errores'] = '|'.join(wsctg.Errores)
it['controles'] = '|'.join(wsctg.Errores)
if '--cambiar_destino_destinatario_rechazado' in sys.argv:
for it in items:
print "cambiando...", ' '.join(['%s=%s' % (k,v) for k,v in it.items()])
transaccion = wsctg.CambiarDestinoDestinatarioCTGRechazado(**it)
print "transaccion: %s" % (transaccion, )
print "Fecha y Hora", wsctg.FechaHora
print "Errores:", wsctg.Errores
it['transaccion'] = transaccion
it['errores'] = '|'.join(wsctg.Errores)
it['controles'] = '|'.join(wsctg.Errores)
if '--consultar_detalle' in sys.argv:
i = sys.argv.index("--consultar_detalle")
if len(sys.argv) > i + 1 and not sys.argv[i+1].startswith("--"):
ctg = int(sys.argv[i+1])
elif not ctg:
ctg = int(raw_input("Numero de CTG: ") or '0') or 73714620
wsctg.LanzarExcepciones = True
for i, it in enumerate(items):
print "consultando detalle...", ctg
ok = wsctg.ConsultarDetalleCTG(ctg)
print "Numero CTG: ", wsctg.NumeroCTG
print "Tarifa Referencia: ", wsctg.TarifaReferencia
print "Observiacion: ", wsctg.Observaciones
print "Carta Porte", wsctg.CartaPorte
print "Numero CTG", wsctg.NumeroCTG
print "Fecha y Hora", wsctg.FechaHora
print "Vigencia Desde", wsctg.VigenciaDesde
print "Vigencia Hasta", wsctg.VigenciaHasta
print "Errores:", wsctg.Errores
print "Controles:", wsctg.Controles
print "Detalle:", wsctg.Detalle
it['numero_ctg'] = wsctg.NumeroCTG
it['observaciones'] = wsctg.Observaciones
it['fecha_hora'] = wsctg.FechaHora
it['vigencia_desde'] = wsctg.VigenciaDesde
it['vigencia_hasta'] = wsctg.VigenciaHasta
wsctg.AnalizarXml("XmlResponse")
for k, ki in {'ctg': 'numero_ctg', 'solicitante': '',
'estado': 'estado',
'especie': '', ##'codigo_especie', no devuelve codigo!
'cosecha': '', ##'codigo_cosecha', no devuelve codigo!
'cuitCanjeador': 'cuit_canjeador',
'cuitDestino': 'cuit_destino',
'cuitDestinatario': 'cuit_destinatario',
'cuitTransportista': 'cuit_transportista',
'establecimiento': 'establecimiento',
'localidadOrigen': 'localidad_origen',
'localidadDestino': 'localidad_destino',
'cantidadHoras': 'cantidad_horas',
'patenteVehiculo': 'patente_vehiculo',
'pesoNetoCarga': 'peso_neto_carga',
'kmRecorridos': 'km_recorridos',
'tarifaReferencia': 'tarifa_referencia',
'ctcCodigo': 'ctc_codigo',
'turno': 'turno',
}.items():
v = wsctg.ObtenerTagXml('consultarDetalleCTGDatos', k)
print k, v
if ki.startswith("cuit") and v:
v = v[:11]
it[ki] = v
escribir_archivo(cols, items, SALIDA)
if "--consultar" in sys.argv:
wsctg.LanzarExcepciones = True
wsctg.ConsultarCTG(fecha_emision_desde="01/04/2012")
print "Numero CTG - Carta de Porte - Imprime Constancia - Estado"
while wsctg.LeerDatosCTG():
print wsctg.NumeroCTG, wsctg.CartaPorte,
print wsctg.ImprimeConstancia, wsctg.Estado, wsctg.FechaHora
if "--consultar_rechazados" in sys.argv:
wsctg.LanzarExcepciones = True
wsctg.ConsultarCTGRechazados()
print "Numero CTG - Carta de Porte - Fecha - Destino/Dest./Obs."
while wsctg.LeerDatosCTG():
print wsctg.NumeroCTG, wsctg.CartaPorte, wsctg.FechaHora,
print wsctg.Destino, wsctg.Destinatario, wstcg.Observaciones
if "--consultar_activos_por_patente" in sys.argv:
i = sys.argv.index("--consultar_activos_por_patente")
if len(sys.argv) > i + 1 and not sys.argv[i+1].startswith("--"):
patente = int(sys.argv[i+1])
elif not ctg:
patente= raw_input("Patente: ") or 'APE652'
wsctg.LanzarExcepciones = True
if '--testing' in sys.argv:
wsctg.LoadTestXML("wsctgv2_activos.xml")
wsctg.ConsultarCTGActivosPorPatente(patente=patente)
print "Numero CTG - Carta de Porte - Fecha - Peso Neto - Usuario"
while wsctg.LeerDatosCTG():
print wsctg.NumeroCTG, wsctg.CartaPorte, wsctg.Patente,
print wsctg.FechaHora, wsctg.FechaVencimiento, wsctg.PesoNeto,
print wsctg.UsuarioSolicitante, wsctg.UsuarioReal
if '--consultar_excel' in sys.argv:
archivo = raw_input("Archivo a generar (planilla.xls): ") or \
'planilla.xls'
wsctg.LanzarExcepciones = True
ok = wsctg.ConsultarCTGExcel(fecha_emision_desde="01/04/2012",
archivo=archivo)
print "Errores:", wsctg.Errores
if '--consultar_constancia_pdf' in sys.argv:
i = sys.argv.index("--consultar_constancia_pdf")
if len(sys.argv) > i + 2 and not sys.argv[i+1].startswith("--"):
ctg = int(sys.argv[i+1])
archivo = sys.argv[i+2]
elif not ctg:
ctg = int(raw_input("Numero de CTG: ") or '0') or 83139794
archivo = raw_input("Archivo a generar (constancia.pdf): ") or \
'constancia.pdf'
wsctg.LanzarExcepciones = True
ok = wsctg.ConsultarConstanciaCTGPDF(ctg, archivo)
print "Errores:", wsctg.Errores
if "--pendientes" in sys.argv:
wsctg.LanzarExcepciones = True
wsctg.CTGsPendientesResolucion()
for clave in ("arrayCTGsRechazadosAResolver",
"arrayCTGsOtorgadosAResolver",
"arrayCTGsConfirmadosAResolver", ):
print clave[6:]
print "Numero CTG - Carta de Porte - Imprime Constancia - Estado"
while wsctg.LeerDatosCTG(clave):
print wsctg.NumeroCTG, wsctg.CartaPorte, wsctg.FechaHora
print wsctg.Destino, wsctg.Destinatario, wsctg.Observaciones
print "hecho."
except SoapFault,e:
print "Falla SOAP:", e.faultcode, e.faultstring.encode("ascii","ignore")
sys.exit(3)
except Exception, e:
ex = utils.exception_info()
print ex
if DEBUG:
raise
sys.exit(5)
```
#### File: IronWeb/pyafipws/wsremcarne.py
```python
__author__ = "<NAME> <<EMAIL>>"
__copyright__ = "Copyright (C) 2018-2019 <NAME>"
__license__ = "LGPL 3.0"
__version__ = "1.02c"
LICENCIA = """
wsremcarne.py: Interfaz para generar Remito Electrónico Cárnico AFIP v3.0
Remito de Carnes y subproductos derivados de la faena de bovinos y porcinos
Resolución General 4256/18 y Resolución General 4303/18.
Copyright (C) 2018-2019 <NAME> <EMAIL>
http://www.sistemasagiles.com.ar/trac/wiki/RemitoElectronicoCarnico
Este progarma es software libre, se entrega ABSOLUTAMENTE SIN GARANTIA
y es bienvenido a redistribuirlo bajo la licencia GPLv3.
Para información adicional sobre garantía, soporte técnico comercial
e incorporación/distribución en programas propietarios ver PyAfipWs:
http://www.sistemasagiles.com.ar/trac/wiki/PyAfipWs
"""
AYUDA="""
Opciones:
--ayuda: este mensaje
--debug: modo depuración (detalla y confirma las operaciones)
--prueba: genera y autoriza una rec de prueba (no usar en producción!)
--xml: almacena los requerimientos y respuestas XML (depuración)
--dummy: consulta estado de servidores
--generar: generar un remito
--emitir: emite un remito
--anular: anula un remito
--autorizar: autoriza un remito
--ult: consulta ultimo nro remito emitido
--consultar: consulta un remito generado
--tipos_comprobante: tabla de parametros para tipo de comprobante
--tipos_contingencia: tipo de contingencia que puede reportar
--tipos_categoria_emisor: tipos de categorías de emisor
--tipos_categoria_receptor: tipos de categorías de receptor
--tipos_estados: estados posibles en los que puede estar un remito cárnico
--grupos_carne' grupos de los distintos tipos de cortes de carne
--tipos_carne': tipos de corte de carne
--codigos_domicilio: codigos de depositos habilitados para el cuit
Ver wsremcarne.ini para parámetros de configuración (URL, certificados, etc.)"
"""
import os, sys, time, base64
from utils import date
import traceback
from pysimplesoap.client import SoapFault
import utils
# importo funciones compartidas:
from utils import json, BaseWS, inicializar_y_capturar_excepciones, get_install_dir, json_serializer
# constantes de configuración (producción/homologación):
WSDL = ["https://serviciosjava.afip.gob.ar/wsremcarne/RemCarneService?wsdl",
"https://fwshomo.afip.gov.ar/wsremcarne/RemCarneService?wsdl"]
DEBUG = False
XML = False
CONFIG_FILE = "wsremcarne.ini"
HOMO = False
ENCABEZADO = []
class WSRemCarne(BaseWS):
"Interfaz para el WebService de Remito Electronico Carnico (Version 3)"
_public_methods_ = ['Conectar', 'Dummy', 'SetTicketAcceso', 'DebugLog',
'GenerarRemito', 'EmitirRemito', 'AutorizarRemito', 'AnularRemito', 'ConsultarRemito',
'InformarContingencia', 'ModificarViaje', 'RegistrarRecepcion', 'ConsultarUltimoRemitoEmitido',
'CrearRemito', 'AgregarViaje', 'AgregarVehiculo', 'AgregarMercaderia',
'AgregarDatosAutorizacion', 'AgregarContingencia',
'ConsultarTiposCarne', 'ConsultarTiposCategoriaEmisor', 'ConsultarTiposCategoriaReceptor',
'ConsultarTiposComprobante', 'ConsultarTiposContingencia', 'ConsultarTiposEstado',
'ConsultarCodigosDomicilio', 'ConsultarGruposCarne', 'ConsultarPuntosEmision',
'SetParametros', 'SetParametro', 'GetParametro', 'AnalizarXml', 'ObtenerTagXml', 'LoadTestXML',
]
_public_attrs_ = ['XmlRequest', 'XmlResponse', 'Version', 'Traceback', 'Excepcion', 'LanzarExcepciones',
'Token', 'Sign', 'Cuit', 'AppServerStatus', 'DbServerStatus', 'AuthServerStatus',
'CodRemito', 'TipoComprobante', 'PuntoEmision',
'NroRemito', 'CodAutorizacion', 'FechaVencimiento', 'FechaEmision', 'Estado', 'Resultado', 'QR',
'ErrCode', 'ErrMsg', 'Errores', 'ErroresFormato', 'Observaciones', 'Obs', 'Evento', 'Eventos',
]
_reg_progid_ = "WSRemCarne"
_reg_clsid_ = "{71DB0CB9-2ED7-4226-A1E6-C3FA7FB18F41}"
# Variables globales para BaseWS:
HOMO = HOMO
WSDL = WSDL[HOMO]
LanzarExcepciones = False
Version = "%s %s" % (__version__, HOMO and 'Homologación' or '')
def Conectar(self, *args, **kwargs):
ret = BaseWS.Conectar(self, *args, **kwargs)
return ret
def inicializar(self):
self.AppServerStatus = self.DbServerStatus = self.AuthServerStatus = None
self.CodRemito = self.TipoComprobante = self.PuntoEmision = None
self.NroRemito = self.CodAutorizacion = self.FechaVencimiento = self.FechaEmision = None
self.Estado = self.Resultado = self.QR = None
self.Errores = []
self.ErroresFormato = []
self.Observaciones = []
self.Eventos = []
self.Evento = self.ErrCode = self.ErrMsg = self.Obs = ""
def __analizar_errores(self, ret):
"Comprueba y extrae errores si existen en la respuesta XML"
self.Errores = [err['codigoDescripcion'] for err in ret.get('arrayErrores', [])]
self.ErroresFormato = [err['codigoDescripcionString'] for err in ret.get('arrayErroresFormato', [])]
errores = self.Errores + self.ErroresFormato
self.ErrCode = ' '.join(["%(codigo)s" % err for err in errores])
self.ErrMsg = '\n'.join(["%(codigo)s: %(descripcion)s" % err for err in errores])
def __analizar_observaciones(self, ret):
"Comprueba y extrae observaciones si existen en la respuesta XML"
self.Observaciones = [obs["codigoDescripcion"] for obs in ret.get('arrayObservaciones', [])]
self.Obs = '\n'.join(["%(codigo)s: %(descripcion)s" % obs for obs in self.Observaciones])
def __analizar_evento(self, ret):
"Comprueba y extrae el wvento informativo si existen en la respuesta XML"
evt = ret.get('evento')
if evt:
self.Eventos = [evt]
self.Evento = "%(codigo)s: %(descripcion)s" % evt
@inicializar_y_capturar_excepciones
def CrearRemito(self, tipo_comprobante, punto_emision, tipo_movimiento, categoria_emisor, cuit_titular_mercaderia, cod_dom_origen,
tipo_receptor, categoria_receptor=None, cuit_receptor=None, cuit_depositario=None,
cod_dom_destino=None, cod_rem_redestinar=None, cod_remito=None, estado=None,
**kwargs):
"Inicializa internamente los datos de un remito para autorizar"
self.remito = {'tipoComprobante': tipo_comprobante, 'puntoEmision': punto_emision, 'categoriaEmisor': categoria_emisor,
'cuitTitularMercaderia': cuit_titular_mercaderia, 'cuitDepositario': cuit_depositario,
'tipoReceptor': tipo_receptor, 'categoriaReceptor': categoria_receptor, 'cuitReceptor': cuit_receptor,
'codDomOrigen': cod_dom_origen, 'codDomDestino': cod_dom_destino, 'tipoMovimiento': tipo_movimiento,
'estado': estado, 'codRemito': cod_remito,
'codRemRedestinado': cod_rem_redestinar,
'arrayMercaderias': [], 'arrayContingencias': [],
}
return True
@inicializar_y_capturar_excepciones
def AgregarViaje(self, cuit_transportista=None, cuit_conductor=None, fecha_inicio_viaje=None, distancia_km=None, **kwargs):
"Agrega la información referente al viaje del remito electrónico cárnico"
self.remito['viaje'] = {'cuitTransportista': cuit_transportista,
'cuitConductor': cuit_conductor,
'fechaInicioViaje': fecha_inicio_viaje ,
'distanciaKm': distancia_km,
'vehiculo': {}
}
return True
@inicializar_y_capturar_excepciones
def AgregarVehiculo(self, dominio_vehiculo=None, dominio_acoplado=None, **kwargs):
"Agrega la información referente al vehiculo usado en el viaje del remito electrónico cárnico"
self.remito['viaje']['vehiculo'] = {'dominioVehiculo': dominio_vehiculo, 'dominioAcoplado': dominio_acoplado}
return True
@inicializar_y_capturar_excepciones
def AgregarMercaderia(self, orden=None, cod_tipo_prod=None, kilos=None, unidades=None, tropa=None, kilos_rec=None, unidades_rec=None, **kwargs):
"Agrega la información referente a la mercadería del remito electrónico cárnico"
mercaderia = dict(orden=orden, tropa=tropa, codTipoProd=cod_tipo_prod, kilos=kilos, unidades=unidades,
kilosRec=kilos_rec, unidadesRec=unidades_rec)
self.remito['arrayMercaderias'].append(dict(mercaderia=mercaderia))
return True
@inicializar_y_capturar_excepciones
def AgregarDatosAutorizacion(self, nro_remito=None, cod_autorizacion=None, fecha_emision=None, fecha_vencimiento=None, **kwargs):
"Agrega la información referente a los datos de autorización del remito electrónico cárnico"
self.remito['datosEmision'] = dict(nroRemito=nro_remito, codAutorizacion=cod_autorizacion,
fechaEmision=fecha_emision, fechaVencimiento=fecha_vencimiento,
)
return True
@inicializar_y_capturar_excepciones
def AgregarContingencias(self, tipo=None, observacion=None, **kwargs):
"Agrega la información referente a los opcionales de la liq. seq."
contingencia = dict(tipoContingencia=tipo, observacion=observacion)
self.remito['arrayContingencias'].append(dict(contingencia=contingencia))
return True
@inicializar_y_capturar_excepciones
def GenerarRemito(self, id_req, archivo="qr.png"):
"Informar los datos necesarios para la generación de un remito nuevo"
if not self.remito.get('arrayContingencias'):
if 'arrayContingencias' in self.remito:
del self.remito['arrayContingencias']
response = self.client.generarRemito(
authRequest={'token': self.Token, 'sign': self.Sign, 'cuitRepresentada': self.Cuit},
idReq=id_req, remito=self.remito)
ret = response.get("generarRemitoReturn")
if ret:
self.__analizar_errores(ret)
self.__analizar_observaciones(ret)
self.__analizar_evento(ret)
self.AnalizarRemito(ret, archivo)
return bool(self.CodRemito)
def AnalizarRemito(self, ret, archivo=None):
"Extrae el resultado del remito, si existen en la respuesta XML"
if ret:
self.CodRemito = ret.get("codRemito")
self.TipoComprobante = ret.get("tipoComprobante")
self.PuntoEmision = ret.get("puntoEmision")
datos_aut = ret.get('datosEmision')
if datos_aut:
self.NroRemito = datos_aut.get('nroRemito')
self.CodAutorizacion = str(datos_aut.get('codAutorizacion'))
self.FechaEmision = datos_aut.get('fechaEmision')
self.FechaVencimiento = datos_aut.get('fechaVencimiento')
self.Estado = ret.get('estado')
self.Resultado = ret.get('resultado')
self.QR = ret.get('qr') or ""
if archivo:
f = open(archivo, "wb")
f.write(self.QR)
f.close()
@inicializar_y_capturar_excepciones
def EmitirRemito(self, archivo="qr.png"):
"Emitir Remitos que se encuentren en estado Pendiente de Emitir."
response = self.client.emitirRemito(
authRequest={'token': self.Token, 'sign': self.Sign, 'cuitRepresentada': self.Cuit},
codRemito=self.remito['codRemito'],
viaje=self.remito.get('viaje'))
ret = response.get("emitirRemitoReturn")
if ret:
self.__analizar_errores(ret)
self.__analizar_observaciones(ret)
self.__analizar_evento(ret)
self.AnalizarRemito(ret, archivo)
return bool(self.CodRemito)
@inicializar_y_capturar_excepciones
def AutorizarRemito(self, archivo="qr.png"):
"Autorizar o denegar un remito (cuando corresponde autorizacion) por parte del titular/depositario"
response = self.client.autorizarRemito(
authRequest={'token': self.Token, 'sign': self.Sign, 'cuitRepresentada': self.Cuit},
codRemito=self.remito['codRemito'],
estado=self.remito['estado'])
ret = response.get("autorizarRemitoReturn")
if ret:
self.__analizar_errores(ret)
self.__analizar_observaciones(ret)
self.__analizar_evento(ret)
self.AnalizarRemito(ret, archivo)
return bool(self.CodRemito)
@inicializar_y_capturar_excepciones
def AnularRemito(self):
"Anular un remito generado que aún no haya sido emitido"
response = self.client.anularRemito(
authRequest={'token': self.Token, 'sign': self.Sign, 'cuitRepresentada': self.Cuit},
codRemito=self.remito['codRemito'])
ret = response.get("anularRemitoReturn")
if ret:
self.__analizar_errores(ret)
self.__analizar_observaciones(ret)
self.__analizar_evento(ret)
self.AnalizarRemito(ret)
return bool(self.CodRemito)
@inicializar_y_capturar_excepciones
def ConsultarUltimoRemitoEmitido(self, tipo_comprobante=995, punto_emision=1):
"Obtener el último número de remito que se emitió por tipo de comprobante y punto de emisión"
response = self.client.consultarUltimoRemitoEmitido(
authRequest={'token': self.Token, 'sign': self.Sign, 'cuitRepresentada': self.Cuit},
tipoComprobante=tipo_comprobante,
puntoEmision=punto_emision)
ret = response.get("consultarUltimoRemitoReturn", {})
id_req = ret.get("idReq", 0)
rec = ret.get("remito", {})
self.__analizar_errores(ret)
self.__analizar_observaciones(ret)
self.__analizar_evento(ret)
self.AnalizarRemito(rec)
return id_req
@inicializar_y_capturar_excepciones
def ConsultarRemito(self, cod_remito=None, id_req=None,
tipo_comprobante=None, punto_emision=None, nro_comprobante=None, cuit_emisor=None):
"Obtener los datos de un remito generado"
response = self.client.consultarRemito(
authRequest={'token': self.Token, 'sign': self.Sign, 'cuitRepresentada': self.Cuit},
codRemito=cod_remito,
idReq=id_req,
cuitEmisor=cuit_emisor,
tipoComprobante=tipo_comprobante,
puntoEmision=punto_emision,
nroComprobante=nro_comprobante)
ret = response.get("consultarRemitoReturn", {})
id_req = ret.get("idReq", 0)
self.remito = rec = ret.get("remito", {})
self.__analizar_errores(ret)
self.__analizar_observaciones(ret)
self.__analizar_evento(ret)
self.AnalizarRemito(rec)
return id_req
@inicializar_y_capturar_excepciones
def Dummy(self):
"Obtener el estado de los servidores de la AFIP"
results = self.client.dummy()['dummyReturn']
self.AppServerStatus = str(results['appserver'])
self.DbServerStatus = str(results['dbserver'])
self.AuthServerStatus = str(results['authserver'])
@inicializar_y_capturar_excepciones
def ConsultarTiposComprobante(self, sep="||"):
"Obtener el código y descripción para tipo de comprobante"
ret = self.client.consultarTiposComprobante(
authRequest={
'token': self.Token, 'sign': self.Sign,
'cuitRepresentada': self.Cuit, },
)['consultarTiposComprobanteReturn']
self.__analizar_errores(ret)
array = ret.get('arrayTiposComprobante', [])
lista = [it['codigoDescripcion'] for it in array]
return [(u"%s {codigo} %s {descripcion} %s" % (sep, sep, sep)).format(**it) if sep else it for it in lista]
@inicializar_y_capturar_excepciones
def ConsultarTiposContingencia(self, sep="||"):
"Obtener el código y descripción para cada tipo de contingencia que puede reportar"
ret = self.client.consultarTiposContingencia(
authRequest={
'token': self.Token, 'sign': self.Sign,
'cuitRepresentada': self.Cuit, },
)['consultarTiposContingenciaReturn']
self.__analizar_errores(ret)
array = ret.get('arrayTiposContingencia', [])
lista = [it['codigoDescripcion'] for it in array]
return [(u"%s {codigo} %s {descripcion} %s" % (sep, sep, sep)).format(**it) if sep else it for it in lista]
@inicializar_y_capturar_excepciones
def ConsultarTiposCategoriaEmisor(self, sep="||"):
"Obtener el código y descripción para tipos de categorías de emisor"
ret = self.client.consultarTiposCategoriaEmisor(
authRequest={
'token': self.Token, 'sign': self.Sign,
'cuitRepresentada': self.Cuit, },
)['consultarCategoriasEmisorReturn']
self.__analizar_errores(ret)
array = ret.get('arrayCategoriasEmisor', [])
lista = [it['codigoDescripcionString'] for it in array]
return [(u"%s {codigo} %s {descripcion} %s" % (sep, sep, sep)).format(**it) if sep else it for it in lista]
@inicializar_y_capturar_excepciones
def ConsultarTiposCategoriaReceptor(self, sep="||"):
"Obtener el código y descripción para cada tipos de categorías de receptor"
ret = self.client.consultarTiposCategoriaReceptor(
authRequest={
'token': self.Token, 'sign': self.Sign,
'cuitRepresentada': self.Cuit, },
)['consultarCategoriasReceptorReturn']
self.__analizar_errores(ret)
array = ret.get('arrayCategoriasReceptor', [])
lista = [it['codigoDescripcionString'] for it in array]
return [(u"%s {codigo} %s {descripcion} %s" % (sep, sep, sep)).format(**it) if sep else it for it in lista]
@inicializar_y_capturar_excepciones
def ConsultarTiposEstado(self, sep="||"):
"Obtener el código y descripción para cada estado posibles en los que puede estar un remito cárnico"
ret = self.client.consultarTiposEstado(
authRequest={
'token': self.Token, 'sign': self.Sign,
'cuitRepresentada': self.Cuit, },
)['consultarTiposEstadoReturn']
self.__analizar_errores(ret)
array = ret.get('arrayTiposEstado', [])
lista = [it['codigoDescripcionString'] for it in array]
return [(u"%s {codigo} %s {descripcion} %s" % (sep, sep, sep)).format(**it) if sep else it for it in lista]
@inicializar_y_capturar_excepciones
def ConsultarGruposCarne(self, sep="||"):
"Obtener el código y descripción para los grupos de los distintos tipos de cortes de carne"
ret = self.client.consultarGruposCarne(
authRequest={
'token': self.Token, 'sign': self.Sign,
'cuitRepresentada': self.Cuit, },
)['consultarGruposCarneReturn']
self.__analizar_errores(ret)
array = ret.get('arrayGruposCarne', [])
lista = [it['codigoDescripcionString'] for it in array]
return [(u"%s {codigo} %s {descripcion} %s" % (sep, sep, sep)).format(**it) if sep else it for it in lista]
@inicializar_y_capturar_excepciones
def ConsultarTiposCarne(self, cod_grupo_carne=1, sep="||"):
"Obtener el código y descripción para tipos de corte de carne"
ret = self.client.consultarTiposCarne(
authRequest={
'token': self.Token, 'sign': self.Sign,
'cuitRepresentada': self.Cuit, },
codGrupoCarne=cod_grupo_carne,
)['consultarTiposCarneReturn']
self.__analizar_errores(ret)
array = ret.get('arrayTiposCarne', [])
lista = [it['codigoDescripcionString'] for it in array]
return [(u"%s {codigo} %s {descripcion} %s" % (sep, sep, sep)).format(**it) if sep else it for it in lista]
@inicializar_y_capturar_excepciones
def ConsultarCodigosDomicilio(self, cuit_titular=1, sep="||"):
"Obtener el código de depositos que tiene habilitados para operar el cuit informado"
ret = self.client.consultarCodigosDomicilio(
authRequest={
'token': self.Token, 'sign': self.Sign,
'cuitRepresentada': self.Cuit, },
cuitTitularDomicilio=cuit_titular,
)['consultarCodigosDomicilioReturn']
self.__analizar_errores(ret)
array = ret.get('arrayDomicilios', [])
lista = [it['codigoDescripcion'] for it in array]
return [(u"%s {codigo} %s {descripcion} %s" % (sep, sep, sep)).format(**it) if sep else it for it in lista]
# busco el directorio de instalación (global para que no cambie si usan otra dll)
if not hasattr(sys, "frozen"):
basepath = __file__
elif sys.frozen=='dll':
import win32api
basepath = win32api.GetModuleFileName(sys.frozendllhandle)
else:
basepath = sys.executable
INSTALL_DIR = WSRemCarne.InstallDir = get_install_dir()
if __name__ == '__main__':
if '--ayuda' in sys.argv:
print LICENCIA
print AYUDA
sys.exit(0)
if "--register" in sys.argv or "--unregister" in sys.argv:
import win32com.server.register
win32com.server.register.UseCommandLine(WSRemCarne)
sys.exit(0)
from ConfigParser import SafeConfigParser
try:
if "--version" in sys.argv:
print "Versión: ", __version__
for arg in sys.argv[1:]:
if arg.startswith("--"):
break
print "Usando configuración:", arg
CONFIG_FILE = arg
config = SafeConfigParser()
config.read(CONFIG_FILE)
CERT = config.get('WSAA','CERT')
PRIVATEKEY = config.get('WSAA','PRIVATEKEY')
CUIT = config.get('WSRemCarne','CUIT')
ENTRADA = config.get('WSRemCarne','ENTRADA')
SALIDA = config.get('WSRemCarne','SALIDA')
if config.has_option('WSAA','URL') and not HOMO:
wsaa_url = config.get('WSAA','URL')
else:
wsaa_url = None
if config.has_option('WSRemCarne','URL') and not HOMO:
wsremcarne_url = config.get('WSRemCarne','URL')
else:
wsremcarne_url = WSDL[HOMO]
if config.has_section('DBF'):
conf_dbf = dict(config.items('DBF'))
if DEBUG: print "conf_dbf", conf_dbf
else:
conf_dbf = {}
DEBUG = '--debug' in sys.argv
XML = '--xml' in sys.argv
if DEBUG:
print "Usando Configuración:"
print "wsaa_url:", wsaa_url
print "wsremcarne_url:", wsremcarne_url
# obteniendo el TA
from wsaa import WSAA
wsaa = WSAA()
ta = wsaa.Autenticar("wsremcarne", CERT, PRIVATEKEY, wsaa_url, debug=DEBUG)
if not ta:
sys.exit("Imposible autenticar con WSAA: %s" % wsaa.Excepcion)
# cliente soap del web service
wsremcarne = WSRemCarne()
wsremcarne.Conectar(wsdl=wsremcarne_url)
wsremcarne.SetTicketAcceso(ta)
wsremcarne.Cuit = CUIT
ok = None
if '--dummy' in sys.argv:
ret = wsremcarne.Dummy()
print "AppServerStatus", wsremcarne.AppServerStatus
print "DbServerStatus", wsremcarne.DbServerStatus
print "AuthServerStatus", wsremcarne.AuthServerStatus
sys.exit(0)
if '--ult' in sys.argv:
try:
pto_emision = int(sys.argv[sys.argv.index("--ult") + 1])
except IndexError, ValueError:
pto_emision = 1
try:
tipo_cbte = int(sys.argv[sys.argv.index("--ult") + 1])
except IndexError, ValueError:
tipo_comprobante = 995
rec = {}
print "Consultando ultimo remito pto_emision=%s tipo_comprobante=%s" % (pto_emision, tipo_comprobante)
ok = wsremcarne.ConsultarUltimoRemitoEmitido(tipo_comprobante, pto_emision)
if wsremcarne.Excepcion:
print >> sys.stderr, "EXCEPCION:", wsremcarne.Excepcion
if DEBUG: print >> sys.stderr, wsremcarne.Traceback
print "Ultimo Nro de Remito", wsremcarne.NroRemito
print "Errores:", wsremcarne.Errores
if '--consultar' in sys.argv:
try:
cod_remito = sys.argv[sys.argv.index("--consultar") + 1]
except IndexError, ValueError:
cod_remito = None
rec = {}
print "Consultando remito cod_remito=%s" % (cod_remito, )
ok = wsremcarne.ConsultarRemito(cod_remito)
if wsremcarne.Excepcion:
print >> sys.stderr, "EXCEPCION:", wsremcarne.Excepcion
if DEBUG: print >> sys.stderr, wsremcarne.Traceback
print "Ultimo Nro de Remito", wsremcarne.NroRemito
print "Errores:", wsremcarne.Errores
if DEBUG:
import pprint
pprint.pprint(wsremcarne.remito)
if '--prueba' in sys.argv:
rec = dict(tipo_comprobante=995, punto_emision=1, categoria_emisor=1,
tipo_movimiento='ENV', # ENV: Envio Normal, PLA: Retiro en planta, REP: Reparto, RED: Redestino
cuit_titular_mercaderia='20222222223', cod_dom_origen=1,
tipo_receptor='EM', # 'EM': DEPOSITO EMISOR, 'MI': MERCADO INTERNO, 'RP': REPARTO
categoria_receptor=1, id_req=int(time.time()),
cuit_receptor='20111111112', cuit_depositario=None,
cod_dom_destino=1, cod_rem_redestinar=None,
cod_remito=30,
)
if "--autorizar" in sys.argv:
rec["estado"] = 'A' # 'A': Autorizar, 'D': Denegar
rec['viaje'] = dict(cuit_transportista='20333333334', cuit_conductor='20333333334',
fecha_inicio_viaje='2018-10-01', distancia_km=999)
rec['viaje']['vehiculo'] = dict(dominio_vehiculo='AAA000', dominio_acoplado='ZZZ000')
rec['mercaderias'] = [dict(orden=1, tropa=1, cod_tipo_prod='2.13', kilos=10, unidades=1)]
rec['datos_autorizacion'] = None # dict(nro_remito=None, cod_autorizacion=None, fecha_emision=None, fecha_vencimiento=None)
rec['contingencias'] = [dict(tipo=1, observacion="anulacion")]
with open(ENTRADA, "w") as archivo:
json.dump(rec, archivo, sort_keys=True, indent=4)
if '--cargar' in sys.argv:
with open(ENTRADA, "r") as archivo:
rec = json.load(archivo)
wsremcarne.CrearRemito(**rec)
wsremcarne.AgregarViaje(**rec['viaje'])
wsremcarne.AgregarVehiculo(**rec['viaje']['vehiculo'])
for mercaderia in rec['mercaderias']:
wsremcarne.AgregarMercaderia(**mercaderia)
datos_aut = rec['datos_autorizacion']
if datos_aut:
wsremcarne.AgregarDatosAutorizacion(**datos_aut)
for contingencia in rec['contingencias']:
wsremcarne.AgregarContingencias(**contingencia)
if '--generar' in sys.argv:
if '--testing' in sys.argv:
wsremcarne.LoadTestXML("tests/xml/wsremcarne.xml") # cargo respuesta
ok = wsremcarne.GenerarRemito(id_req=rec['id_req'], archivo="qr.jpg")
if '--emitir' in sys.argv:
ok = wsremcarne.EmitirRemito()
if '--autorizar' in sys.argv:
ok = wsremcarne.AutorizarRemito()
if '--anular' in sys.argv:
ok = wsremcarne.AnularRemito()
if ok is not None:
print "Resultado: ", wsremcarne.Resultado
print "Cod Remito: ", wsremcarne.CodRemito
if wsremcarne.CodAutorizacion:
print "Numero Remito: ", wsremcarne.NroRemito
print "Cod Autorizacion: ", wsremcarne.CodAutorizacion
print "Fecha Emision", wsremcarne.FechaEmision
print "Fecha Vencimiento", wsremcarne.FechaVencimiento
print "Estado: ", wsremcarne.Estado
print "Observaciones: ", wsremcarne.Observaciones
print "Errores:", wsremcarne.Errores
print "Errores Formato:", wsremcarne.ErroresFormato
print "Evento:", wsremcarne.Evento
rec['nro_remito'] = wsremcarne.NroRemito
rec['cod_autorizacion'] = wsremcarne.CodAutorizacion
rec['cod_remito'] = wsremcarne.CodRemito
rec['resultado'] = wsremcarne.Resultado
rec['observaciones'] = wsremcarne.Observaciones
rec['fecha_emision'] = wsremcarne.FechaEmision
rec['fecha_vencimiento'] = wsremcarne.FechaVencimiento
rec['errores'] = wsremcarne.Errores
rec['errores_formato'] = wsremcarne.ErroresFormato
rec['evento'] = wsremcarne.Evento
if '--grabar' in sys.argv:
with open(SALIDA, "w") as archivo:
json.dump(rec, archivo, sort_keys=True, indent=4, default=json_serializer)
# Recuperar parámetros:
if '--tipos_comprobante' in sys.argv:
ret = wsremcarne.ConsultarTiposComprobante()
print "\n".join(ret)
if '--tipos_contingencia' in sys.argv:
ret = wsremcarne.ConsultarTiposContingencia()
print "\n".join(ret)
if '--tipos_categoria_emisor' in sys.argv:
ret = wsremcarne.ConsultarTiposCategoriaEmisor()
print "\n".join(ret)
if '--tipos_categoria_receptor' in sys.argv:
ret = wsremcarne.ConsultarTiposCategoriaReceptor()
print "\n".join(ret)
if '--tipos_estados' in sys.argv:
ret = wsremcarne.ConsultarTiposEstado()
print "\n".join(ret)
if '--grupos_carne' in sys.argv:
ret = wsremcarne.ConsultarGruposCarne()
print "\n".join(ret)
if '--tipos_carne' in sys.argv:
for grupo_carne in wsremcarne.ConsultarGruposCarne(sep=None):
ret = wsremcarne.ConsultarTiposCarne(grupo_carne['codigo'])
print "\n".join(ret)
if '--codigos_domicilio' in sys.argv:
cuit = raw_input("Cuit Titular Domicilio: ")
ret = wsremcarne.ConsultarCodigosDomicilio(cuit)
print "\n".join(ret)
if wsremcarne.Errores or wsremcarne.ErroresFormato:
print "Errores:", wsremcarne.Errores, wsremcarne.ErroresFormato
print "hecho."
except SoapFault,e:
print "Falla SOAP:", e.faultcode, e.faultstring.encode("ascii","ignore")
sys.exit(3)
except Exception, e:
ex = utils.exception_info()
print ex
if DEBUG:
raise
sys.exit(5)
```
#### File: IronWeb/trabajos/models.py
```python
from __future__ import unicode_literals
from django.core.urlresolvers import reverse
from django.db import models
# from .utilidades import ESTADO,TIPO_CANCHA,TIPOUSR,ESTADO_CUOTA,TRIBUTO_CUOTA,TIPO_LOGIN
from django.contrib.auth.models import User
from datetime import datetime,date
from dateutil.relativedelta import *
from django.conf import settings
import os
from .utilidades import *
from comprobantes.models import cpb_estado,cpb_tipo,cpb_comprobante
from general.models import gral_empresa
from usuarios.models import usu_usuario
import os
def get_image_name(instance, filename):
f, ext = os.path.splitext(filename)
archivo = filename
return os.path.join('empresa', archivo)
class orden_pedido(models.Model):
id = models.AutoField(primary_key=True,db_index=True)
empresa = models.ForeignKey('general.gral_empresa', db_column='empresa',blank=True, null=True,on_delete=models.SET_NULL)
cliente = models.ForeignKey('entidades.egr_entidad',db_column='cliente',related_name='op_cliente',blank=True, null=True,on_delete=models.SET_NULL) #Cliente/Proveedor
vendedor = models.ForeignKey('entidades.egr_entidad',db_column='vendedor',related_name='op_vendedor',blank=True, null=True,on_delete=models.SET_NULL)
fecha_creacion = models.DateTimeField(auto_now_add = True,blank=True, null=True)
fecha = models.DateField('Fecha Orden')
fecha_vto = models.DateField('Fecha',blank=True, null=True)
numero = models.CharField(u'Número',max_length=50)
ped_mostrador = models.BooleanField('Mostrador',default=False)
ped_webface = models.BooleanField('Web/Faceb',default=False)
ped_comercial = models.BooleanField('Comercial',default=False)
ped_email = models.BooleanField('Email',default=False)
estado = models.ForeignKey(cpb_estado,related_name='op_estado',blank=True, null=True,on_delete=models.SET_NULL)
impres_laser = models.BooleanField(u'Láser',default=False)
impres_latex = models.BooleanField('Latex',default=False)
impres_rotulado = models.BooleanField('Rotulado',default=False)
impres_offset = models.BooleanField('Offset',default=False)
impres_corporeo = models.BooleanField(u'Corpóreo',default=False)
impres_disenio = models.BooleanField(u'Diseño',default=False)
impres_ploteo_papel = models.BooleanField('Ploteo Papel',default=False)
impres_facturero = models.BooleanField('Facturero',default=False)
impres_sellos = models.BooleanField('Sellos',default=False)
impres_imprbyn = models.BooleanField('Impr.B/N',default=False)
term_cortado = models.BooleanField('Cortado',default=False)
term_troquelado = models.BooleanField('Troquelado',default=False)
term_abrochado = models.BooleanField('Abrochado',default=False)
term_engomado = models.BooleanField('Engomado',default=False)
term_plegado = models.BooleanField(u'Plegado',default=False)
term_arandelas = models.BooleanField(u'Arandelas',default=False)
term_bolsillos = models.BooleanField('Bolsillos',default=False)
term_plastificado = models.BooleanField('Plastificado',default=False)
term_imp_corte = models.BooleanField('Imp.y Corte',default=False)
term_anillado = models.BooleanField('Anillado',default=False)
archivo_enviado = models.CharField(u'Archivo/Medio',max_length=100,blank=True, null=True)
fecha_entrega = models.DateField(blank=True, null=True)
hora_entrega = models.TimeField(blank=True, null=True)
muestra_enviada = models.ForeignKey('entidades.egr_entidad',db_column='tercerizado',related_name='op_tercerizado',blank=True, null=True,on_delete=models.SET_NULL) #Cliente/Proveedor
firma_conformidad = models.BooleanField('Firmado/Aceptado',default=False)
importe_total = models.DecimalField(max_digits=15, decimal_places=2, blank=True, null=True,default=0)#Suma de todo
detalle = models.TextField(max_length=1000, blank=True, null=True) # Field name made lowercase.
id_presupuesto = models.ForeignKey(cpb_comprobante,verbose_name=u'CPB', db_column='id_presupuesto',related_name='op_presupuesto',blank=True, null=True,on_delete=models.SET_NULL)
fecha_pendiente = models.DateField('Fecha',blank=True, null=True)
fecha_proceso = models.DateField('Fecha',blank=True, null=True)
fecha_terminado = models.DateField('Fecha',blank=True, null=True)
fecha_entregado = models.DateField('Fecha',blank=True, null=True)
usuario = models.ForeignKey(usu_usuario,db_column='usuario',blank=True, null=True,related_name='op_usuario',on_delete=models.SET_NULL)
id_venta = models.ForeignKey(cpb_comprobante,verbose_name=u'Venta', db_column='id_venta',related_name='op_venta',blank=True, null=True,on_delete=models.SET_NULL)
class Meta:
db_table = 'trab_orden_pedido'
def __unicode__(self):
return u'%s' % (self.numero)
def _generaOT(self):
pedidos = orden_trabajo.objects.filter(orden_pedido=self)
return not pedidos.exists()
generaOT = property(_generaOT)
class orden_pedido_detalle(models.Model):
id = models.AutoField(primary_key=True,db_index=True)
orden_pedido = models.ForeignKey('orden_pedido',verbose_name=u'Nº Orden', db_column='orden_pedido',blank=True, null=True)
producto = models.ForeignKey('productos.prod_productos',db_column='producto',related_name='op_producto',blank=True, null=True,on_delete=models.SET_NULL) #Cliente/Proveedor
cantidad = models.DecimalField(max_digits=15, decimal_places=2, blank=True, null=True,default=1)
importe_unitario = models.DecimalField(max_digits=15, decimal_places=2, blank=True, null=True,default=0)
importe_total = models.DecimalField(max_digits=15, decimal_places=2, blank=True, null=True,default=0)
fecha_creacion = models.DateTimeField(auto_now_add = True)
detalle = models.TextField(max_length=1000,blank=True, null=True) # Field name made lowercase.
origen_destino = models.ForeignKey('productos.prod_ubicacion',verbose_name='Origen/Destino', db_column='op_origen_destino',blank=True, null=True,on_delete=models.SET_NULL)
lista_precios = models.ForeignKey('productos.prod_lista_precios',db_column='lista_precios',related_name='op_lista_precios',blank=True, null=True,on_delete=models.SET_NULL) #Cliente/Pro
class Meta:
db_table = 'trab_op_detalle'
def __unicode__(self):
return u'%s-%s' % (self.producto,self.cantidad)
class orden_trabajo(models.Model):
id = models.AutoField(primary_key=True,db_index=True)
orden_pedido = models.ForeignKey('orden_pedido',verbose_name=u'Nº Orden', db_column='orden_pedido',blank=True, null=True,on_delete=models.SET_NULL)
empresa = models.ForeignKey('general.gral_empresa', db_column='empresa',blank=True, null=True,on_delete=models.SET_NULL)
responsable = models.ForeignKey('entidades.egr_entidad',db_column='vendedor',related_name='ot_vendedor',blank=True, null=True,on_delete=models.SET_NULL)
fecha_creacion = models.DateTimeField(auto_now_add = True,blank=True, null=True)
fecha = models.DateField('Fecha Orden')
fecha_estimada = models.DateField('Fecha Estimada',blank=True, null=True)
numero = models.CharField(u'Número',max_length=50)
estado = models.ForeignKey(cpb_estado,related_name='ot_estado',blank=True, null=True,on_delete=models.SET_NULL)
detalle = models.TextField(max_length=1000, blank=True, null=True) # Field name made lowercase.
fecha_terminado = models.DateField('Fecha',blank=True, null=True)
usuario = models.ForeignKey(usu_usuario,db_column='usuario',blank=True, null=True,related_name='ot_usuario',on_delete=models.SET_NULL)
class Meta:
db_table = 'trab_orden_trabajo'
def __unicode__(self):
return u'%s' % (self.numero)
class orden_trabajo_detalle(models.Model):
id = models.AutoField(primary_key=True,db_index=True)
orden_trabajo = models.ForeignKey('orden_trabajo',verbose_name=u'Nº Orden', db_column='orden_trabajo',blank=True, null=True)
producto = models.ForeignKey('productos.prod_productos',db_column='producto',related_name='ot_producto',blank=True, null=True,on_delete=models.SET_NULL) #Cliente/Proveedor
cantidad = models.DecimalField(max_digits=15, decimal_places=2, blank=True, null=True,default=1)
fecha_creacion = models.DateTimeField(auto_now_add = True)
detalle = models.TextField(max_length=1000,blank=True, null=True) # Field name made lowercase.
class Meta:
db_table = 'trab_ot_detalle'
def __unicode__(self):
return u'%s-%s' % (self.producto,self.cantidad)
class orden_colocacion(models.Model):
id = models.AutoField(primary_key=True,db_index=True)
orden_trabajo = models.ForeignKey('orden_trabajo', db_column='id_orden_trabajo',related_name="ot_padre",blank=True, null=True,on_delete=models.SET_NULL)
empresa = models.ForeignKey('general.gral_empresa', db_column='empresa',blank=True, null=True,on_delete=models.SET_NULL)
estado = models.ForeignKey(cpb_estado,related_name='oc_estado',blank=True, null=True,on_delete=models.SET_NULL)
fecha_creacion = models.DateTimeField(auto_now_add = True,blank=True, null=True)
fecha_colocacion = models.DateField(blank=True, null=True)
hora_colocacion = models.TimeField(blank=True, null=True)
vendedor = models.ForeignKey('entidades.egr_entidad',db_column='vendedor',related_name='oc_vendedor',blank=True, null=True,on_delete=models.SET_NULL)
colocador = models.ForeignKey('entidades.egr_entidad',db_column='colocador',related_name='oc_colocador',blank=True, null=True,on_delete=models.SET_NULL)
fecha_vto = models.DateField('Fecha',blank=True, null=True)
numero = models.CharField(u'Número',max_length=50)
detalle = models.TextField(max_length=1000, blank=True, null=True) # Field name made lowercase.
fecha_colocado = models.DateField('Fecha',blank=True, null=True)
usuario = models.ForeignKey(usu_usuario,db_column='usuario',blank=True, null=True,related_name='oc_usuario',on_delete=models.SET_NULL)
class Meta:
db_table = 'trab_orden_colocacion'
def __unicode__(self):
return u'%s' % (self.numero)
from django.db.models.signals import post_save,post_delete
from django.dispatch import receiver
@receiver(post_save, sender=orden_pedido,dispatch_uid="actualizar_ultimo_nro_op")
def actualizar_ultimo_nro_op(sender, instance,created, **kwargs):
if created:
nro=int(instance.numero)
tipo=cpb_tipo.objects.get(pk=15)
tipo.ultimo_nro=nro
tipo.save()
```
#### File: IronWeb/varios/mem.py
```python
import subprocess
import sys
CMD = "ps -o rss,command -u %s | grep -v peruser | awk '{sum += $1} END {print sum / 1024}'"
MEM = {}
def main():
proc = subprocess.Popen('groups', shell=True, stdout=subprocess.PIPE)
proc.wait()
stdout = proc.stdout.read()
for user in stdout.split():
proc = subprocess.Popen(CMD % user, shell=True, stdout=subprocess.PIPE)
proc.wait()
MEM[user] = int(float(proc.stdout.read()))
print
print 'Total Memory Usage: %i MB' % sum(MEM.values())
print
for user in sorted(MEM.keys()):
print user.ljust(15), str(MEM[user]).rjust(3), 'MB'
print
print 'Note: "Total Memory Usage" is only valid when you execute mem using your\
account\'s primary SSH user.'
print
if __name__ == '__main__':
main()
``` |
{
"source": "JMJAC/Rango",
"score": 2
} |
#### File: tango_project/rango/views.py
```python
from django.shortcuts import render, HttpResponseRedirect, HttpResponse, reverse, redirect
from rango.models import Category, Page, User, UserProfile
from rango.forms import CategoryForm, PageForm, UserForm, UserProfileForm
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from datetime import datetime
from rango.webhose_search import run_query
# A Helper method
def visitor_cookie_handler(request):
visits = int(get_server_side_cookies(request, 'visits', '1'))
last_visit_cookie = get_server_side_cookies(request, 'last_visit', str(datetime.now()))
last_visit_time = datetime.strptime(last_visit_cookie[:-7], '%Y-%m-%d %H:%M:%S')
if (datetime.now() - last_visit_time).days > 0:
visits += 1
request.session['last_visit'] = str(datetime.now())
else:
request.session['last_visit'] = last_visit_cookie
request.session['visits'] = visits
# A Helper method
def get_server_side_cookies(request, cookie, default_val=None):
val = request.session.get(cookie)
if not val:
val = default_val
return val
def index(request):
visitor_cookie_handler(request)
context_dict = {'categories': Category.objects.order_by('-likes')[:5], 'pages': Page.objects.order_by('-views')[:5], 'visits': request.session['visits']}
return render(request, 'rango/index.html', context_dict)
def about(request):
context_dict = {'name': 'JMJAC'}
return render(request, 'rango/about.html', context_dict)
def show_category(request, category_name_slug):
context_dict = {'result_list': []}
try:
category = Category.objects.get(slug=category_name_slug)
pages = Page.objects.filter(category=category).order_by('-views')
context_dict['category'] = category
context_dict['pages'] = pages
except Category.DoesNotExist:
context_dict['category'] = None
context_dict['pages'] = None
# Search functionality
if request.method == 'POST':
query = request.POST['query'].strip()
if query:
context_dict['result_list'] = run_query(query)
return render(request, 'rango/category.html', context_dict)
@login_required
def add_category(request):
form = CategoryForm()
if request.method == 'POST':
form = CategoryForm(request.POST)
if form.is_valid():
# if the form is valid commit and direct user to index
form.save(commit=True)
return index(request)
else:
print(form.errors)
return render(request, 'rango/add_category.html', {'form': form})
@login_required
def add_page(request, category_name_slug):
try:
category = Category.objects.get(slug=category_name_slug)
except Category.DoesNotExist:
category = None
form = PageForm()
if request.method == 'POST':
form = PageForm(request.POST)
if form.is_valid():
if category:
page = form.save(commit=False)
page.category = category
page.save()
return redirect(reverse('rango:show_category', kwargs={'category_name_slug': category_name_slug}))
else:
print(form.errors)
return render(request, 'rango/add_page.html', {'form': form, 'category': category})
# Old, not used
def register(request):
registered = False
if request.method == 'POST':
user_form = UserForm(data=request.POST)
profile_form = UserProfileForm(data=request.POST)
if user_form.is_valid() and profile_form.is_valid():
user = user_form.save()
user.set_password(<PASSWORD>)
user.save()
profile = profile_form.save(commit=False)
profile.user = user
if 'picture' in request.FILES:
profile.picture = request.FILES['picture']
profile.save()
registered = True
else:
user_form = UserForm()
profile_form = UserProfileForm()
return render(request, 'rango/register.html', {'user_form': user_form, 'profile_form': profile_form, 'registered': registered})
# Old, not used
def user_login(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user:
if user.is_active:
login(request, user)
return HttpResponseRedirect(reverse('rango:index'))
else:
return HttpResponse('Your account is disabled.')
else:
print(f'Invalid login details {username}, {password}')
return render(request, 'rango/login.html', {'error': 'Invalid login details'})
else:
return render(request, 'rango/login.html', {})
@login_required
def restricted(request):
return HttpResponse("Since you're logged in, you can see this text!")
# Old, not used
@login_required
def user_logout(request):
logout(request)
return HttpResponseRedirect(reverse('index'))
def search(request):
result_list = []
if request.method == 'POST':
query = request.POST['query'].strip()
if query:
result_list = run_query(query)
return render(request, 'rango/search.html', {'result_list': result_list})
def track_url(request):
page_id = None
if request.method == 'GET':
if 'page_id' in request.GET:
page_id = request.GET['page_id']
if page_id:
try:
page = Page.objects.get(id=page_id)
page.views += 1
page.save()
return redirect(page.url)
except Exception as e:
return HttpResponse(f'Page id {page_id} not found. Error code: {e}')
print('No page id in get string')
return redirect(reverse('index'))
@login_required
def register_profile(request):
form = UserProfileForm
if request.method == 'POST':
form = UserProfileForm(request.POST, request.FILES)
if form.is_valid():
user_profie = form.save(commit=False)
user_profie.user = request.user
user_profie.save()
picture = form.picture
return redirect(reverse('index'))
else:
print(form.errors)
return render(request, 'registration/profile_registration.html', {'form': form})
def profile(request, username):
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
return redirect(reverse('index'))
userprofile = UserProfile.objects.get_or_create(user=user)[0]
form = UserProfileForm({'website': userprofile.website, 'picture': userprofile.picture})
if user.username == userprofile.user.username:
if request.method == "POST":
form = UserProfileForm(request.POST, request.FILES, instance=userprofile)
if form.is_valid():
form.save(commit=True)
return redirect(reverse('rango:profile', kwargs={'username': user.username}))
else:
print(form.errors)
return render(request, 'rango/profile.html', {'userprofile': userprofile, 'selecteduser': user, 'form': form})
def list_profile(request):
userprofile_list = UserProfile.objects.all()
return render(request, 'rango/list_profiles.html', {'userprofile_list': userprofile_list})
``` |
{
"source": "jmjava/dev-example-flights",
"score": 3
} |
#### File: api/python/airlines.py
```python
import sys
import simplejson as json
import mariadb
import os
import flask
from flask import request
from flask import Blueprint
from dotenv import load_dotenv
load_dotenv()
airlines = Blueprint('airlines', __name__)
config = {
'host': os.getenv("DB_HOST"),
'port': int(os.getenv("DB_PORT")),
'user': os.getenv("DB_USER"),
'password': <PASSWORD>("<PASSWORD>"),
'database': os.getenv("DB_NAME"),
'ssl_ca': os.getenv("SSL_CA")
}
@airlines.route('/api/airlines', methods=['GET'])
def index():
conn = mariadb.connect(**config)
cur = conn.cursor()
cur.execute("select * from airlines order by airline")
row_headers=[x[0] for x in cur.description]
rv = cur.fetchall()
json_data=[]
for result in rv:
json_data.append(dict(zip(row_headers,result)))
return json.dumps(json_data)
```
#### File: api/python/api.py
```python
import flask
from airlines import airlines
from airports import airports
from flights import flights
app = flask.Flask(__name__)
app.config["DEBUG"] = True
app.register_blueprint(airlines)
app.register_blueprint(airports)
app.register_blueprint(flights)
@app.route("/api/version")
def version():
return "1.0"
app.run(port=8080)
``` |
{
"source": "jmjjg/jinja2schema",
"score": 2
} |
#### File: tests/unit_tests/test_filter_visitor.py
```python
import pytest
from jinja2 import nodes
from jinja2schema import config, parse, UnexpectedExpression, InvalidExpression
from jinja2schema.visitors.expr import visit_filter, Context
from jinja2schema.model import Dictionary, Scalar, List, Unknown, String, Number
def get_scalar_context(ast):
return Context(return_struct_cls=Scalar, predicted_struct=Scalar.from_ast(ast))
def test_string_filters():
for filter in ('capitalize', 'lower', 'striptags', 'title', 'upper', 'urlize'):
template = '{{ x|' + filter + ' }}'
ast = parse(template).find(nodes.Filter)
ctx = Context(return_struct_cls=Scalar, predicted_struct=Scalar.from_ast(ast))
rtype, struct = visit_filter(ast, ctx)
expected_rtype = String(label='x', linenos=[1])
expected_struct = Dictionary({
'x': String(label='x', linenos=[1]),
})
assert rtype == expected_rtype
assert struct == expected_struct
def test_batch_and_slice_filters():
for filter in ('batch', 'slice'):
template = '{{ items|' + filter + '(3, " ") }}'
ast = parse(template).find(nodes.Filter)
unknown_ctx = Context(predicted_struct=Unknown.from_ast(ast))
rtype, struct = visit_filter(ast, unknown_ctx)
expected_rtype = List(List(Unknown(), linenos=[1]), linenos=[1])
assert rtype == expected_rtype
expected_struct = Dictionary({
'items': List(Unknown(), label='items', linenos=[1]),
})
assert struct == expected_struct
scalar_ctx = Context(predicted_struct=Scalar.from_ast(ast))
with pytest.raises(UnexpectedExpression) as e:
visit_filter(ast, scalar_ctx)
assert str(e.value) == ('conflict on the line 1\n'
'got: AST node jinja2.nodes.Filter of structure [[<unknown>]]\n'
'expected structure: <scalar>')
def test_default_filter():
for filter in ('d', 'default'):
template = '''{{ x|''' + filter + '''('g') }}'''
ast = parse(template).find(nodes.Filter)
rtype, struct = visit_filter(ast, get_scalar_context(ast))
expected_struct = Dictionary({
'x': String(label='x', linenos=[1], used_with_default=True, value='g'),
})
assert struct == expected_struct
def test_filter_chaining():
template = '''{{ (xs|first|last).gsom|sort|length }}'''
ast = parse(template).find(nodes.Filter)
rtype, struct = visit_filter(ast, get_scalar_context(ast))
expected_struct = Dictionary({
'xs': List(List(Dictionary({
'gsom': List(Unknown(), label='gsom', linenos=[1]),
}, linenos=[1]), linenos=[1]), label='xs', linenos=[1]),
})
assert struct == expected_struct
template = '''{{ x|list|sort|first }}'''
ast = parse(template).find(nodes.Filter)
rtype, struct = visit_filter(ast, get_scalar_context(ast))
expected_struct = Dictionary({
'x': Scalar(label='x', linenos=[1]),
})
assert struct == expected_struct
template = '''{{ x|first|list }}'''
ast = parse(template).find(nodes.Filter)
with pytest.raises(UnexpectedExpression) as e:
visit_filter(ast, get_scalar_context(ast))
expected = "conflict on the line 1\n\
got: AST node jinja2.nodes.Filter of structure [<scalar>]\n\
expected structure: <scalar>"
assert expected == str(e.value)
def test_raise_on_unknown_filter():
template = '''{{ x|unknownfilter }}'''
ast = parse(template).find(nodes.Filter)
with pytest.raises(InvalidExpression) as e:
visit_filter(ast, get_scalar_context(ast))
assert 'line 1: unknown filter "unknownfilter"' == str(e.value)
template = '''{{ x|attr('attr') }}'''
ast = parse(template).find(nodes.Filter)
with pytest.raises(InvalidExpression) as e:
visit_filter(ast, get_scalar_context(ast))
assert 'line 1: "attr" filter is not supported' == str(e.value)
def test_abs_filter():
ast = parse('{{ x|abs }}').find(nodes.Filter)
rtype, struct = visit_filter(ast, get_scalar_context(ast))
assert rtype == Number(label='x', linenos=[1])
assert struct == Dictionary({
'x': Number(label='x', linenos=[1])
})
def test_int_filter():
ast = parse('{{ x|int }}').find(nodes.Filter)
rtype, struct = visit_filter(ast, get_scalar_context(ast))
assert rtype == Number(label='x', linenos=[1])
assert struct == Dictionary({
'x': Scalar(label='x', linenos=[1]),
})
def test_wordcount_filter():
ast = parse('{{ x|wordcount }}').find(nodes.Filter)
rtype, struct = visit_filter(ast, get_scalar_context(ast))
assert rtype == Number(label='x', linenos=[1])
assert struct == Dictionary({
'x': String(label='x', linenos=[1])
})
def test_join_filter():
ast = parse('{{ xs|join(separator|default("|")) }}').find(nodes.Filter)
rtype, struct = visit_filter(ast, get_scalar_context(ast))
assert rtype == String(label='xs', linenos=[1])
assert struct == Dictionary({
'xs': List(String(), label='xs', linenos=[1]),
'separator': String(label='separator', linenos=[1], used_with_default=True, value='|'),
})
def test_length_filter():
for filter in ('count', 'length'):
template = '{{ xs|' + filter + ' }}'
ast = parse(template).find(nodes.Filter)
rtype, struct = visit_filter(ast, get_scalar_context(ast))
assert rtype == Number(label='xs', linenos=[1])
assert struct == Dictionary({
'xs': List(Unknown(), label='xs', linenos=[1]),
})
def test_max_min_filter():
for filter in ('max', 'min'):
template = '{{ values|' + filter + ' }}'
ast = parse(template).find(nodes.Filter)
rtype, struct = visit_filter(ast, get_scalar_context(ast))
assert rtype == Scalar(label='values', linenos=[1])
assert struct == Dictionary({
'values': List(Scalar(linenos=[1]), label='values', linenos=[1]),
})
def test_unique_filter():
template = '{{ values|unique }}'
ast = parse(template).find(nodes.Filter)
unknown_ctx = Context(predicted_struct=Unknown.from_ast(ast))
rtype, struct = visit_filter(ast, unknown_ctx)
assert rtype == Unknown(label='values', linenos=[1])
assert struct == Dictionary({
'values': List(Unknown(), label='values', linenos=[1]),
})
def test_reverse_filter():
template = '{{ x|reverse }}'
ast = parse(template).find(nodes.Filter)
rtype, struct = visit_filter(ast, get_scalar_context(ast))
assert rtype == Unknown(label='x', linenos=[1])
expected_struct = Dictionary({
'x': Unknown(label='x', linenos=[1]),
})
assert struct == expected_struct
def test_tojson_filter():
template = '{{ x|tojson }}'
ast = parse(template).find(nodes.Filter)
rtype, struct = visit_filter(ast, get_scalar_context(ast))
assert rtype == String(label='x', linenos=[1])
expected_struct = Dictionary({
'x': Unknown(label='x', linenos=[1]),
})
assert struct == expected_struct
def test_filesizeformat_filter():
template = '{{ x|filesizeformat }}'
ast = parse(template).find(nodes.Filter)
rtype, struct = visit_filter(ast, get_scalar_context(ast))
assert rtype == String(label='x', linenos=[1])
expected_struct = Dictionary({
'x': Number(label='x', linenos=[1]),
})
assert struct == expected_struct
def test_string_filter():
template = '{{ x|string }}'
ast = parse(template).find(nodes.Filter)
rtype, struct = visit_filter(ast, get_scalar_context(ast))
assert rtype == String(label='x', linenos=[1])
expected_struct = Dictionary({
'x': Scalar(label='x', linenos=[1]),
})
assert struct == expected_struct
def test_sum_filter():
template = '{{ x|sum }}'
ast = parse(template).find(nodes.Filter)
rtype, struct = visit_filter(ast, get_scalar_context(ast))
assert rtype == Scalar(label='x', linenos=[1])
expected_struct = Dictionary({
'x': List(Scalar(), label='x', linenos=[1]),
})
assert struct == expected_struct
def test_pprint_filter():
template = '{{ x|pprint }}'
ast = parse(template).find(nodes.Filter)
rtype, struct = visit_filter(ast, get_scalar_context(ast))
assert rtype == Scalar(label='x', linenos=[1])
expected_struct = Dictionary({
'x': Scalar(label='x', linenos=[1]),
})
assert struct == expected_struct
def test_ignore_all_unknown_filter():
template = '{{ x|foo|bar|baz }}'
cfg = config.default_config
cfg.IGNORE_UNKNOWN_FILTERS = True
ast = parse(template).find(nodes.Filter)
rtype, struct = visit_filter(ast, get_scalar_context(ast), None, cfg)
assert rtype == Unknown(label='x', linenos=[1])
expected_struct = Dictionary({
'x': Unknown(label='x', linenos=[1]),
})
assert struct == expected_struct
def test_ignore_some_unknown_filter():
cfg = config.default_config
cfg.IGNORE_UNKNOWN_FILTERS = ('foo', 'bar', 'baz')
# 1. Check that it works when all the filter names are given
template = '{{ x|foo|bar|baz }}'
ast = parse(template).find(nodes.Filter)
rtype, struct = visit_filter(ast, get_scalar_context(ast), None, cfg)
assert rtype == Unknown(label='x', linenos=[1])
expected_struct = Dictionary({
'x': Unknown(label='x', linenos=[1]),
})
assert struct == expected_struct
# 2. Check that an exception is raised for a filter whose name is not in the list
template = '{{ x|foo|bar|baz|boz }}'
ast = parse(template).find(nodes.Filter)
with pytest.raises(InvalidExpression) as e:
visit_filter(ast, get_scalar_context(ast), None, cfg)
assert 'line 1: unknown filter "boz"' == str(e.value)
``` |
{
"source": "jmjuanes/server",
"score": 2
} |
#### File: server/src/utils.py
```python
import os
import mimetypes
import json
# Read a configuration file
def read_config(config_path):
full_path = os.path.join(os.path.dirname(__file__), config_path)
with open(os.path.normpath(full_path)) as f:
config_data = json.load(f)
return config_data
# Get the subdomain of the request host
def get_subdomain(hostname):
return ".".join(hostname.split(".")[:-2])
# Get the extname from a path
def get_extname(path):
return os.path.splitext(path)[1]
# Get the mimetype from a path
# Extracted from: https://stackoverflow.com/a/45459425
def get_mimetype(path):
return mimetypes.guess_type(path)[0] or "application/octet-stream"
``` |
{
"source": "JMJustasDBML/let-me-see-you",
"score": 3
} |
#### File: JMJustasDBML/let-me-see-you/main.py
```python
import cv2
import os, sys
from datetime import datetime
from time import strftime
# Camera 0 is the integrated web cam
camera_port = 0
ramp_frames = 30
PHOTOS_DIR = ".captures"
homedir = os.path.expanduser("~")
targetDir = os.path.join(homedir, PHOTOS_DIR)
fileName = os.path.join(targetDir, datetime.now().strftime("%Y_%m_%d_%H_%M_%S") + ".png")
if not os.path.exists(targetDir):
os.makedirs(targetDir)
camera = cv2.VideoCapture(camera_port)
def get_image():
retval, im = camera.read()
return im
# Ramp the camera - these frames will be discarded and are only used to allow v4l2
# to adjust light levels, if necessary
for i in xrange(ramp_frames):
temp = get_image()
print("Taking image...")
# Take the actual image we want to keep
camera_capture = get_image()
cv2.imwrite(fileName, camera_capture)
del(camera)
``` |
{
"source": "jmk008/Personal-Projects",
"score": 3
} |
#### File: Personal-Projects/Twitter Api/twitter.py
```python
from twython import Twython #Python Library for twitter
from twython import TwythonStreamer
import random
#Authentication keys
from auth import (
consumer_key,
comsumer_key_secret,
access_token,
access_token_secret
)
twitter = Twython(
consumer_key,
comsumer_key_secret,
access_token,
access_token_secret
)
#Sending a random tweet on Twitter
choices = ['New Fav Song: Whats up danger', 'Gooo Patriots!!', 'Crush Rams']
message = random.choice(choices)
twitter.update_status(status=message)
print("Tweeted: %s" % message)
#Uploading an image on Twitter
message = "Hello World - here's a picture!"
image = open('image.jpg', 'rb') #image path
response = twitter.upload_media(media=image) #getting the media id from twitter
media_id = [response['media_id']]
twitter.update_status(status=message, media_ids=media_id) #uploading the image
print("Tweeted: " + message)
class MyStreamer(TwythonStreamer):
def on_success(self, data):
if 'text' in data:
username = data['user']['screen_name']
tweet = data['text']
print("@{}:{}".format(username, tweet))
stream = MyStreamer(
consumer_key,
comsumer_key_secret,
access_token,
access_token_secret
)
#Tracking tweets with the key word "raspberry pi"
stream.statuses.filter(track='raspberry pi')
```
#### File: Personal-Projects/Upload weather data to weather underground/WU-upload.py
```python
import requests
from sense_hat import SenseHat
from time import sleep
sense = SenseHat()
#Function to convert hPa to inches
def hpa_to_inches(pressure_in_hpa):
pressure_in_inches_of_m = pressure_in_hpa * 0.02953
return pressure_in_inches_of_m
#Function to convert rainfall in mm to inches
def mm_to_inches(rainfall_in_mm):
rainfall_in_inches = rainfall_in_mm * 0.0393701
return rainfall_in_inches
#Function to convert C to F since sensehat returns in C and underground
# weather station takes temp in F
def degc_to_degf(temperature_in_c):
temperature_in_f = (temperature_in_c * (9/5.0)) + 32
return temperature_in_f
#Convert km/h to m/h
def kmh_to_mph(speed_in_kmh):
speed_in_mph = speed_in_kmh * 0.621371
return speed_in_mph
# create a string to hold the first part of the URL
WUurl = "https://weatherstation.wunderground.com/weatherstation\
/updateweatherstation.php?"
WU_station_id = "KVAFAIRF147" # Replace XXXX with your PWS ID
WU_station_pwd = "<PASSWORD>" # Replace YYYY with your Password
WUcreds = "ID=" + WU_station_id + "&PASSWORD="+ WU_station_pwd
date_str = "&dateutc=now"
action_str = "&action=updateraw"
while True:
#weather parameters
humidity = sense.get_humidity()
ambient_temp = sense.get_temperature()
pressure = sense.get_pressure()
#ground_temp = 16.345
#wind_speed = 5.6129
#wind_gust = 12.9030
#wind_average = 180
#rainfall = 1.270
#formatting data to 2 decimal places
ambient_temp_str = "{0:.2f}".format(degc_to_degf(ambient_temp))
#ground_temp_str = "{0:.2f}".format(degc_to_degf(ground_temp))
humidity_str = "{0:.2f}".format(humidity)
pressure_str = "{0:.2f}".format(hpa_to_inches(pressure))
#wind_speed_mph_str = "{0:.2f}".format(kmh_to_mph(wind_speed))
#wind_gust_mph_str = "{0:.2f}".format(kmh_to_mph(wind_gust))
#wind_average_str = str(wind_average)
#rainfall_in_str = "{0:.2f}".format(mm_to_inches(rainfall))
#creating a request
r= requests.get(
WUurl +
WUcreds +
date_str +
"&humidity=" + humidity_str +
"&baromin=" + pressure_str +
#"&windspeedmph=" + wind_speed_mph_str +
#"&windgustmph=" + wind_gust_mph_str +
"&tempf=" + ambient_temp_str +
#"&rainin=" + rainfall_in_str +
#"&soiltempf=" + ground_temp_str +
#"&winddir=" + wind_average_str +
action_str)
#success if 200 code received
print("Received " + str(r.status_code) + " " + str(r.text))
sleep(300)
``` |
{
"source": "jmk74871/Erfolgsberichte_QT",
"score": 2
} |
#### File: Erfolgsberichte_QT/login/mainwindow.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(881, 286)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setObjectName("tabWidget")
self.tab_login = QtWidgets.QWidget()
self.tab_login.setObjectName("tab_login")
self.formLayoutWidget = QtWidgets.QWidget(self.tab_login)
self.formLayoutWidget.setGeometry(QtCore.QRect(10, 10, 771, 96))
self.formLayoutWidget.setObjectName("formLayoutWidget")
self.formLayout = QtWidgets.QFormLayout(self.formLayoutWidget)
self.formLayout.setContentsMargins(0, 0, 0, 0)
self.formLayout.setObjectName("formLayout")
self.label = QtWidgets.QLabel(self.formLayoutWidget)
self.label.setObjectName("label")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label)
self.login_name = QtWidgets.QLineEdit(self.formLayoutWidget)
self.login_name.setClearButtonEnabled(True)
self.login_name.setObjectName("login_name")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.login_name)
self.label_2 = QtWidgets.QLabel(self.formLayoutWidget)
self.label_2.setObjectName("label_2")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_2)
self.login_button = QtWidgets.QPushButton(self.formLayoutWidget)
self.login_button.setAutoDefault(False)
self.login_button.setObjectName("login_button")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.login_button)
self.login_pw = QtWidgets.QLineEdit(self.formLayoutWidget)
font = QtGui.QFont()
font.setFamily("MS Shell Dlg 2")
self.login_pw.setFont(font)
self.login_pw.setCursor(QtGui.QCursor(QtCore.Qt.IBeamCursor))
self.login_pw.setInputMask("")
self.login_pw.setText("")
self.login_pw.setEchoMode(QtWidgets.QLineEdit.PasswordEchoOnEdit)
self.login_pw.setClearButtonEnabled(True)
self.login_pw.setObjectName("login_pw")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.login_pw)
self.login_feedback_label = QtWidgets.QLabel(self.formLayoutWidget)
self.login_feedback_label.setObjectName("login_feedback_label")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.login_feedback_label)
self.tabWidget.addTab(self.tab_login, "")
self.tabnew_user = QtWidgets.QWidget()
self.tabnew_user.setObjectName("tabnew_user")
self.formLayoutWidget_2 = QtWidgets.QWidget(self.tabnew_user)
self.formLayoutWidget_2.setGeometry(QtCore.QRect(10, 10, 841, 174))
self.formLayoutWidget_2.setObjectName("formLayoutWidget_2")
self.formLayout_2 = QtWidgets.QFormLayout(self.formLayoutWidget_2)
self.formLayout_2.setContentsMargins(0, 0, 0, 0)
self.formLayout_2.setObjectName("formLayout_2")
self.label_3 = QtWidgets.QLabel(self.formLayoutWidget_2)
self.label_3.setObjectName("label_3")
self.formLayout_2.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_3)
self.label_4 = QtWidgets.QLabel(self.formLayoutWidget_2)
self.label_4.setObjectName("label_4")
self.formLayout_2.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.label_4)
self.cu_name = QtWidgets.QLineEdit(self.formLayoutWidget_2)
self.cu_name.setCursorMoveStyle(QtCore.Qt.LogicalMoveStyle)
self.cu_name.setClearButtonEnabled(True)
self.cu_name.setObjectName("cu_name")
self.formLayout_2.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.cu_name)
self.cu_pw = QtWidgets.QLineEdit(self.formLayoutWidget_2)
self.cu_pw.setEchoMode(QtWidgets.QLineEdit.PasswordEchoOnEdit)
self.cu_pw.setClearButtonEnabled(True)
self.cu_pw.setObjectName("cu_pw")
self.formLayout_2.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.cu_pw)
self.label_6 = QtWidgets.QLabel(self.formLayoutWidget_2)
self.label_6.setObjectName("label_6")
self.formLayout_2.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_6)
self.cu_mail = QtWidgets.QLineEdit(self.formLayoutWidget_2)
self.cu_mail.setInputMethodHints(QtCore.Qt.ImhEmailCharactersOnly)
self.cu_mail.setClearButtonEnabled(True)
self.cu_mail.setObjectName("cu_mail")
self.formLayout_2.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.cu_mail)
self.cu_pw_rep = QtWidgets.QLineEdit(self.formLayoutWidget_2)
self.cu_pw_rep.setEchoMode(QtWidgets.QLineEdit.PasswordEchoOnEdit)
self.cu_pw_rep.setClearButtonEnabled(True)
self.cu_pw_rep.setObjectName("cu_pw_rep")
self.formLayout_2.setWidget(5, QtWidgets.QFormLayout.FieldRole, self.cu_pw_rep)
self.label_5 = QtWidgets.QLabel(self.formLayoutWidget_2)
self.label_5.setObjectName("label_5")
self.formLayout_2.setWidget(5, QtWidgets.QFormLayout.LabelRole, self.label_5)
self.cu_button = QtWidgets.QPushButton(self.formLayoutWidget_2)
self.cu_button.setObjectName("cu_button")
self.formLayout_2.setWidget(6, QtWidgets.QFormLayout.FieldRole, self.cu_button)
self.label_7 = QtWidgets.QLabel(self.formLayoutWidget_2)
self.label_7.setObjectName("label_7")
self.formLayout_2.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.label_7)
self.cu_mail2 = QtWidgets.QLineEdit(self.formLayoutWidget_2)
self.cu_mail2.setInputMethodHints(QtCore.Qt.ImhEmailCharactersOnly)
self.cu_mail2.setClearButtonEnabled(True)
self.cu_mail2.setObjectName("cu_mail2")
self.formLayout_2.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.cu_mail2)
self.cu_feedback_label = QtWidgets.QLabel(self.formLayoutWidget_2)
self.cu_feedback_label.setObjectName("cu_feedback_label")
self.formLayout_2.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.cu_feedback_label)
self.tabWidget.addTab(self.tabnew_user, "")
self.gridLayout.addWidget(self.tabWidget, 0, 1, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 881, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
MainWindow.setTabOrder(self.login_name, self.login_pw)
MainWindow.setTabOrder(self.login_pw, self.login_button)
MainWindow.setTabOrder(self.login_button, self.tabWidget)
MainWindow.setTabOrder(self.tabWidget, self.cu_name)
MainWindow.setTabOrder(self.cu_name, self.cu_mail)
MainWindow.setTabOrder(self.cu_mail, self.cu_mail2)
MainWindow.setTabOrder(self.cu_mail2, self.cu_pw)
MainWindow.setTabOrder(self.cu_pw, self.cu_pw_rep)
MainWindow.setTabOrder(self.cu_pw_rep, self.cu_button)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label.setText(_translate("MainWindow", "Benutzer"))
self.label_2.setText(_translate("MainWindow", "Passwort"))
self.login_button.setText(_translate("MainWindow", "Anmelden"))
self.login_feedback_label.setText(_translate("MainWindow", "TextLabel"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_login), _translate("MainWindow", "Anmeldung"))
self.label_3.setText(_translate("MainWindow", "Benutzer Name:"))
self.label_4.setText(_translate("MainWindow", "Passwort:"))
self.label_6.setText(_translate("MainWindow", "Mailadresse:"))
self.label_5.setText(_translate("MainWindow", "Passwor wiederholen:"))
self.cu_button.setText(_translate("MainWindow", "Anlegen"))
self.label_7.setText(_translate("MainWindow", "2. Mailadresse:"))
self.cu_feedback_label.setText(_translate("MainWindow", "TextLabel"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabnew_user), _translate("MainWindow", "Benutzer anlegen"))
``` |
{
"source": "jmkacz/practice-advent-of-code",
"score": 3
} |
#### File: day03/python/test_part1.py
```python
from part1 import compute_answer
def test_compute_answer_sample_1():
lines = [">"]
expected = 2
actual = compute_answer(lines)
assert actual == expected
def test_compute_answer_sample_2():
lines = ["^>v<"]
expected = 4
actual = compute_answer(lines)
assert actual == expected
def test_compute_answer_sample_3():
lines = ["^v^v^v^v^v"]
expected = 2
actual = compute_answer(lines)
assert actual == expected
def test_compute_answer_full():
with open("../data/input.dat", "r") as infile:
lines = [line.strip() for line in infile.readlines()]
expected = 2592
actual = compute_answer(lines)
assert actual == expected
```
#### File: day07/python/part1.py
```python
from pprint import pprint
from typing import Dict, List, Union
class Circuit:
def __init__(self, wires):
self.wires: Dict[str, List[str]] = wires
def debug(self):
pprint(self.wires)
def get_value(self, instruction: List[str]) -> int:
compute_value = lambda x: int(x) if x.isdigit() else self.process(x)
if "AND" in instruction:
return compute_value(instruction[0]) & compute_value(instruction[2])
elif "OR" in instruction:
return compute_value(instruction[0]) | compute_value(instruction[2])
elif "LSHIFT" in instruction:
return compute_value(instruction[0]) << compute_value(instruction[2])
elif "RSHIFT" in instruction:
return compute_value(instruction[0]) >> compute_value(instruction[2])
elif "NOT" in instruction:
return ((2 << 15) - 1) ^ compute_value(instruction[1])
else:
return compute_value(instruction[0])
def process(self, wire: str) -> int:
result = self.get_value(self.wires[wire])
self.wires[wire] = [str(result)]
# self.debug()
return result
def parse(lines: List[str]) -> Dict[str, List[str]]:
result = {}
for line in lines:
instruction, wire = line.split(" -> ")
result[wire] = instruction.split(" ")
return result
def compute_answer(lines: List[str], wire: str) -> int:
circuit = Circuit(parse(lines))
return circuit.process(wire)
```
#### File: day07/python/test_part1.py
```python
import pytest
from part1 import compute_answer
SAMPLE = [
("d", 72),
("e", 507),
("f", 492),
("g", 114),
("h", 65412),
("i", 65079),
("x", 123),
("y", 456),
]
@pytest.mark.parametrize("wire,signal", SAMPLE)
def test_compute_answer_sample(wire, signal):
lines = [
"123 -> x",
"456 -> y",
"x AND y -> d",
"x OR y -> e",
"x LSHIFT 2 -> f",
"y RSHIFT 2 -> g",
"NOT x -> h",
"NOT y -> i",
]
expected = signal
actual = compute_answer(lines, wire)
assert actual == expected
def test_compute_answer_full():
with open("../data/input.dat", "r") as infile:
lines = [line.strip() for line in infile.readlines()]
wire = "a"
expected = 16076
actual = compute_answer(lines, wire)
assert actual == expected
```
#### File: day07/python/test_part2.py
```python
import pytest
from part2 import compute_answer
def test_compute_answer_full():
with open("../data/input.dat", "r") as infile:
lines = [line.strip() for line in infile.readlines()]
expected = 2797
actual = compute_answer(lines)
assert actual == expected
```
#### File: day09/python/test_part2.py
```python
from part2 import compute_answer
def test_compute_answer_sample():
lines = [
"London to Dublin = 464",
"London to Belfast = 518",
"Dublin to Belfast = 141",
]
expected = 982
actual = compute_answer(lines)
assert actual == expected
def test_compute_answer_full():
with open("../data/input.dat", "r") as infile:
lines = [line.strip() for line in infile.readlines()]
expected = 736
actual = compute_answer(lines)
assert actual == expected
```
#### File: day10/python/part1.py
```python
from typing import List
def play(nums: List[int]) -> List[int]:
result = []
run = 1
index = 1
while index < len(nums):
if nums[index] == nums[index - 1]:
run += 1
else:
result += [run, nums[index - 1]]
run = 1
index += 1
result += [run, nums[index - 1]]
return result
def compute_answer(lines: List[str]) -> int:
nums = [int(_) for _ in lines[0]]
for _ in range(40):
nums = play(nums)
return len(nums)
```
#### File: day06/python/part2.py
```python
from typing import List
def parse(lines: List[str]) -> List[int]:
result = [0] * 9
for x in lines[0].split(","):
result[int(x)] += 1
return result
def compute_answer(lines: List[str], days: int) -> int:
timers = parse(lines)
for day in range(days):
# print(day, timers)
tmp = timers[0]
for i in range(1, len(timers)):
timers[i - 1] = timers[i]
timers[6] += tmp
timers[8] = tmp
# print(day + 1, timers)
return sum(timers)
```
#### File: day09/python/part1.py
```python
from typing import List
def compute_answer(lines: List[str]) -> int:
result = 0
rows = len(lines)
cols = len(lines[0])
for r in range(rows):
for c in range(cols):
# up
if r > 0 and lines[r - 1][c] <= lines[r][c]:
continue
# down
if r < rows - 1 and lines[r + 1][c] <= lines[r][c]:
continue
# left
if c > 0 and lines[r][c - 1] <= lines[r][c]:
continue
# right
if c < cols - 1 and lines[r][c + 1] <= lines[r][c]:
continue
# print(r, c, lines[r][c])
result += int(lines[r][c]) + 1
return result
```
#### File: day09/python/part2.py
```python
import math
from typing import List, Tuple
def find_low_points(lines: List[str]) -> List[Tuple[int, int]]:
result = []
rows = len(lines)
cols = len(lines[0])
for r in range(rows):
for c in range(cols):
# up
if r > 0 and lines[r - 1][c] <= lines[r][c]:
continue
# down
if r < rows - 1 and lines[r + 1][c] <= lines[r][c]:
continue
# left
if c > 0 and lines[r][c - 1] <= lines[r][c]:
continue
# right
if c < cols - 1 and lines[r][c + 1] <= lines[r][c]:
continue
result.append((r, c))
return result
def compute_answer(lines: List[str]) -> int:
result = 0
basins: List[int] = []
rows = len(lines)
cols = len(lines[0])
visited = [[False] * cols for _ in range(rows)]
low_points = find_low_points(lines)
for low_point in low_points:
q = [low_point]
basin = 0
while q:
(r, c) = q.pop()
if r < 0 or r >= rows or c < 0 or c >= cols:
continue
if visited[r][c]:
continue
if lines[r][c] == "9":
continue
visited[r][c] = True
basin += 1
q.extend([(r, c - 1), (r, c + 1), (r - 1, c), (r + 1, c)])
basins = sorted(basins + [basin], reverse=True)[0:3]
result = math.prod(basins)
return result
```
#### File: day09/python/test_part1.py
```python
from part1 import compute_answer
def test_compute_answer_sample():
lines = [
"2199943210",
"3987894921",
"9856789892",
"8767896789",
"9899965678",
]
expected = 15
actual = compute_answer(lines)
assert actual == expected
def test_compute_answer_full():
with open("../data/input.dat", "r") as infile:
lines = [line.strip() for line in infile.readlines()]
expected = 512
actual = compute_answer(lines)
assert actual == expected
```
#### File: day10/python/test_part1.py
```python
from part1 import compute_answer, is_corrupted
def test_not_is_corrupted_1():
line = "()"
expected = False
actual, _, _ = is_corrupted(line)
assert actual == expected
def test_not_is_corrupted_2():
line = "[]"
expected = False
actual, _, _ = is_corrupted(line)
assert actual == expected
def test_not_is_corrupted_3():
line = "([])"
expected = False
actual, _, _ = is_corrupted(line)
assert actual == expected
def test_not_is_corrupted_4():
line = "{()()()}"
expected = False
actual, _, _ = is_corrupted(line)
assert actual == expected
def test_not_is_corrupted_5():
line = "<([{}])>"
expected = False
actual, _, _ = is_corrupted(line)
assert actual == expected
def test_not_is_corrupted_6():
line = "[<>({}){}[([])<>]]"
expected = False
actual, _, _ = is_corrupted(line)
assert actual == expected
def test_not_is_corrupted_7():
line = "(((((((((())))))))))"
expected = False
actual, _, _ = is_corrupted(line)
assert actual == expected
def test_is_corrupted_1():
line = "(]"
expected = True
actual, _, _ = is_corrupted(line)
assert actual == expected
def test_is_corrupted_2():
line = "{()()()>"
expected = True
actual, _, _ = is_corrupted(line)
assert actual == expected
def test_is_corrupted_3():
line = "(((()))}"
expected = True
actual, _, _ = is_corrupted(line)
assert actual == expected
def test_is_corrupted_4():
line = "<([]){()}[{}])"
expected = True
actual, _, _ = is_corrupted(line)
assert actual == expected
def test_compute_answer_sample():
lines = [
"[({(<(())[]>[[{[]{<()<>>",
"[(()[<>])]({[<{<<[]>>(",
"{([(<{}[<>[]}>{[]{[(<()>",
"(((({<>}<{<{<>}{[]{[]{}",
"[[<[([]))<([[{}[[()]]]",
"[{[{({}]{}}([{[{{{}}([]",
"{<[[]]>}<{[{[{[]{()[[[]",
"[<(<(<(<{}))><([]([]()",
"<{([([[(<>()){}]>(<<{{",
"<{([{{}}[<[[[<>{}]]]>[]]",
]
# {([(<{}[<>[]}>{[]{[(<()> - Expected ], but found } instead.
# [[<[([]))<([[{}[[()]]] - Expected ], but found ) instead.
# [{[{({}]{}}([{[{{{}}([] - Expected ), but found ] instead.
# [<(<(<(<{}))><([]([]() - Expected >, but found ) instead.
# <{([([[(<>()){}]>(<<{{ - Expected ], but found > instead.
expected = [
"[({(<(())[]>[[{[]{<()<>>",
"[(()[<>])]({[<{<<[]>>(",
"(((({<>}<{<{<>}{[]{[]{}",
"{<[[]]>}<{[{[{[]{()[[[]",
"<{([{{}}[<[[[<>{}]]]>[]]",
]
expected_score = 26397
actual, actual_score = compute_answer(lines)
assert actual == expected
assert actual_score == expected_score
def test_compute_answer_full():
with open("../data/input.dat", "r") as infile:
lines = [line.strip() for line in infile.readlines()]
expected_score = 266301
_, actual_score = compute_answer(lines)
assert actual_score == expected_score
```
#### File: day11/python/test_part1.py
```python
from part1 import compute_answer, parse, step
def test_step_1():
lines = parse(
[
"11111",
"19991",
"19191",
"19991",
"11111",
]
)
expected = parse(
[
"34543",
"40004",
"50005",
"40004",
"34543",
]
)
actual = step(lines)
assert actual == expected
def test_step_2():
lines = parse(
[
"34543",
"40004",
"50005",
"40004",
"34543",
]
)
expected = parse(
[
"45654",
"51115",
"61116",
"51115",
"45654",
]
)
actual = step(lines)
assert actual == expected
def test_compute_answer_sample():
lines = [
"5483143223",
"2745854711",
"5264556173",
"6141336146",
"6357385478",
"4167524645",
"2176841721",
"6882881134",
"4846848554",
"5283751526",
]
expected = 1656
actual = compute_answer(lines)
assert actual == expected
def test_compute_answer_full():
with open("../data/input.dat", "r") as infile:
lines = [line.strip() for line in infile.readlines()]
expected = 1640
actual = compute_answer(lines)
assert actual == expected
```
#### File: day12/python/part2.py
```python
from collections import defaultdict
from pprint import pprint
from typing import Dict, List, Tuple
def parse(lines: List[str]) -> Dict[str, List[str]]:
"""Convert the undirected graph into a directed graph."""
map = defaultdict(list)
for line in lines:
cave1, cave2 = line.split("-")
# You cannot enter the "start" node.
# You cannot leave the "end" node.
if cave2 != "start" and cave1 != "end":
map[cave1].append(cave2)
if cave1 != "start" and cave2 != "end":
map[cave2].append(cave1)
return map
def compute_answer(lines: List[str]) -> int:
map = parse(lines)
paths = []
queue: List[Tuple[List[str], bool]] = [(["start"], False)]
while queue:
# pprint(queue)
path, repeat = queue.pop()
last = path[-1]
if last == "end":
paths.append(path + ["end"])
continue
for cave in map[last]:
if cave.isupper() or cave not in path or not repeat:
queue.append(
(path + [cave], (repeat or (cave.islower() and cave in path)))
)
# pprint(paths)
return len(paths)
```
#### File: day15/python/test_part2.py
```python
from part2 import compute_answer
def test_compute_answer_sample():
lines = [
"1163751742",
"1381373672",
"2136511328",
"3694931569",
"7463417111",
"1319128137",
"1359912421",
"3125421639",
"1293138521",
"2311944581",
]
expected = 315
actual = compute_answer(lines)
assert actual == expected
def test_compute_answer_full():
with open("../data/input.dat", "r") as infile:
lines = [line.strip() for line in infile.readlines()]
expected = 2935
actual = compute_answer(lines)
assert actual == expected
```
#### File: day21/python/part1.py
```python
import re
from typing import Dict, Generator, List
def parse(lines: List[str]) -> Dict[int, int]:
result = {}
pattern = re.compile(r"Player (\d+) starting position: (\d+)")
for line in lines:
match = pattern.match(line)
if match is None:
raise Exception("Invalid line")
result[int(match.group(1)) - 1] = int(match.group(2))
return result
def generate_roll() -> Generator[int, None, None]:
roll = 0
while True:
yield roll + 1
roll = (roll + 1) % 100
def compute_answer(lines: List[str]) -> int:
die = generate_roll()
spaces = parse(lines)
scores = {k: 0 for k in spaces}
player = 0
rolls = 0
while all([score < 1000 for score in scores.values()]):
roll1 = next(die)
roll2 = next(die)
roll3 = next(die)
rolls += 3
spaces[player] = (spaces[player] + roll1 + roll2 + roll3 - 1) % 10 + 1
scores[player] += spaces[player]
# print(player, roll1, roll2, roll3, spaces[player], scores[player])
player = (player + 1) % len(spaces)
return scores[player] * rolls
```
#### File: templates/python/part2.py
```python
from typing import List
def compute_answer(lines: List[str]) -> int:
raise NotImplementedError
``` |
{
"source": "JmKanmo/PetServiceComputing_Web",
"score": 3
} |
#### File: PetServiceComputing_Web/database/db_model.py
```python
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class DB_Model(db.Model):
__tablename__ = "sqlalchemy_DB"
id = db.Column(db.Integer, primary_key=True)
address = db.Column(db.String(100))
def __init__(self, id, address):
self.id = id
self.address = address
@property
def get_jsonAddress(self):
return{
'id': self.id,
'address': self.address
}
```
#### File: site-packages/yattag/simpledoc.py
```python
__all__ = ['SimpleDoc']
import re
class DocError(Exception):
pass
class SimpleDoc(object):
"""
class generating xml/html documents using context managers
doc, tag, text = SimpleDoc().tagtext()
with tag('html'):
with tag('body', id = 'hello'):
with tag('h1'):
text('Hello world!')
print(doc.getvalue())
"""
class Tag(object):
def __init__(self, doc, name, attrs): # name is the tag name (ex: 'div')
self.doc = doc
self.name = name
self.attrs = attrs
def __enter__(self):
self.parent_tag = self.doc.current_tag
self.doc.current_tag = self
self.position = len(self.doc.result)
self.doc._append('')
def __exit__(self, tpe, value, traceback):
if value is None:
if self.attrs:
self.doc.result[self.position] = "<%s %s>" % (
self.name,
dict_to_attrs(self.attrs),
)
else:
self.doc.result[self.position] = "<%s>" % self.name
self.doc._append("</%s>" % self.name)
self.doc.current_tag = self.parent_tag
class DocumentRoot(object):
class DocumentRootError(DocError, AttributeError):
# Raising an AttributeError on __getattr__ instead of just a DocError makes it compatible
# with the pickle module (some users asked for pickling of SimpleDoc instances).
# I also keep the DocError from earlier versions to avoid possible compatibility issues
# with existing code.
pass
def __getattr__(self, item):
raise SimpleDoc.DocumentRoot.DocumentRootError("DocumentRoot here. You can't access anything here.")
_newline_rgx = re.compile(r'\r?\n')
def __init__(self, stag_end = ' />', nl2br = False):
r"""
stag_end:
the string terminating self closing tags.
This determines what tags produced using the `stag` method will look like.
For example, if you set `stag_end='>'`, then `doc.stag('hr')` will
produce a `<hr>` tag.
If you set `stag_end=' />'` (the default), then `doc.stag('hr')` would
instead produce a `<hr />` tag.
If you set `nl2br=True`, then the `text` method will also
produce `<br>` or `<br />` tags according to this preference when encountering
new lines.
Defaults to ' />'.
nl2br:
if set to True, the `text` method will turn new lines
('\n' or '\r\n' sequences) in the input to `<br />` html tags,
or possibly to `<br>` tags if using the `stag_end` parameter to this effect.
(see explanations about `stag_end` above).
Defaults to False (new lines are not replaced).
"""
self.result = []
self.current_tag = self.__class__.DocumentRoot()
self._append = self.result.append
assert stag_end in (' />', '/>', '>')
self._stag_end = stag_end
self._br = '<br' + stag_end
self._nl2br = nl2br
def tag(self, tag_name, *args, **kwargs):
"""
opens a HTML/XML tag for use inside a `with` statement
the tag is closed when leaving the `with` block
HTML/XML attributes can be supplied as keyword arguments,
or alternatively as (key, value) pairs.
The values of the keyword arguments should be strings.
They are escaped for use as HTML attributes
(the " character is replaced with ")
In order to supply a "class" html attributes, you must supply a `klass` keyword
argument. This is because `class` is a reserved python keyword so you can't use it
outside of a class definition.
Example::
with tag('h1', id = 'main-title'):
text("Hello world!")
# <h1 id="main-title">Hello world!</h1> was appended to the document
with tag('td',
('data-search', 'lemon'),
('data-order', '1384'),
id = '16'
):
text('<NAME>')
# you get: <td data-search="lemon" data-order="1384" id="16">Citrus Limon</td>
"""
return self.__class__.Tag(self, tag_name, _attributes(args, kwargs))
def text(self, *strgs):
r"""
appends 0 or more strings to the document
the strings are escaped for use as text in html documents, that is,
& becomes &
< becomes <
> becomes >
Example::
username = 'Max'
text('Hello ', username, '!') # appends "Hello Max!" to the current node
text('16 > 4') # appends "16 > 4" to the current node
New lines ('\n' or '\r\n' sequences) are left intact, unless you have set the
nl2br option to True when creating the SimpleDoc instance. Then they would be
replaced with `<br />` tags (or `<br>` tags if using the `stag_end` option
of the SimpleDoc constructor as shown in the example below).
Example::
>>> doc = SimpleDoc()
>>> doc.text('pistachio\nice cream')
>>> doc.getvalue()
'pistachio\nice cream'
>>> doc = SimpleDoc(nl2br=True)
>>> doc.text('pistachio\nice cream')
>>> doc.getvalue()
'pistachio<br />ice cream'
>>> doc = SimpleDoc(nl2br=True, stag_end='>')
>>> doc.text('pistachio\nice cream')
>>> doc.getvalue()
'pistachio<br>ice cream'
"""
for strg in strgs:
transformed_string = html_escape(strg)
if self._nl2br:
self._append(
self.__class__._newline_rgx.sub(
self._br,
transformed_string
)
)
else:
self._append(transformed_string)
def line(self, tag_name, text_content, *args, **kwargs):
"""
Shortcut to write tag nodes that contain only text.
For example, in order to obtain::
<h1>The 7 secrets of catchy titles</h1>
you would write::
line('h1', 'The 7 secrets of catchy titles')
which is just a shortcut for::
with tag('h1'):
text('The 7 secrets of catchy titles')
The first argument is the tag name, the second argument
is the text content of the node.
The optional arguments after that are interpreted as xml/html
attributes. in the same way as with the `tag` method.
Example::
line('a', 'Who are we?', href = '/about-us.html')
produces::
<a href="/about-us.html">Who are we?</a>
"""
with self.tag(tag_name, *args, **kwargs):
self.text(text_content)
def asis(self, *strgs):
"""
appends 0 or more strings to the documents
contrary to the `text` method, the strings are appended "as is"
&, < and > are NOT escaped
Example::
doc.asis('<!DOCTYPE html>') # appends <!DOCTYPE html> to the document
"""
for strg in strgs:
if strg is None:
raise TypeError("Expected a string, got None instead.")
# passing None by mistake was frequent enough to justify a check
# see https://github.com/leforestier/yattag/issues/20
self._append(strg)
def nl(self):
self._append('\n')
def attr(self, *args, **kwargs):
"""
sets HTML/XML attribute(s) on the current tag
HTML/XML attributes are supplied as (key, value) pairs of strings,
or as keyword arguments.
The values of the keyword arguments should be strings.
They are escaped for use as HTML attributes
(the " character is replaced with ")
Note that, instead, you can set html/xml attributes by passing them as
keyword arguments to the `tag` method.
In order to supply a "class" html attributes, you can either pass
a ('class', 'my_value') pair, or supply a `klass` keyword argument
(this is because `class` is a reserved python keyword so you can't use it
outside of a class definition).
Examples::
with tag('h1'):
text('Welcome!')
doc.attr(id = 'welcome-message', klass = 'main-title')
# you get: <h1 id="welcome-message" class="main-title">Welcome!</h1>
with tag('td'):
text('<NAME>')
doc.attr(
('data-search', 'lemon'),
('data-order', '1384')
)
# you get: <td data-search="lemon" data-order="1384">Citrus Limon</td>
"""
self.current_tag.attrs.update(_attributes(args, kwargs))
def data(self, *args, **kwargs):
"""
sets HTML/XML data attribute(s) on the current tag
HTML/XML data attributes are supplied as (key, value) pairs of strings,
or as keyword arguments.
The values of the keyword arguments should be strings.
They are escaped for use as HTML attributes
(the " character is replaced with ")
Note that, instead, you can set html/xml data attributes by passing them as
keyword arguments to the `tag` method.
Examples::
with tag('h1'):
text('Welcome!')
doc.data(msg='welcome-message')
# you get: <h1 data-msg="welcome-message">Welcome!</h1>
with tag('td'):
text('<NAME>')
doc.data(
('search', 'lemon'),
('order', '1384')
)
# you get: <td data-search="lemon" data-order="1384">Citrus Limon</td>
"""
self.attr(
*(('data-%s' % key, value) for (key, value) in args),
**dict(('data-%s' % key, value) for (key, value) in kwargs.items())
)
def stag(self, tag_name, *args, **kwargs):
"""
appends a self closing tag to the document
html/xml attributes can be supplied as keyword arguments,
or alternatively as (key, value) pairs.
The values of the keyword arguments should be strings.
They are escaped for use as HTML attributes
(the " character is replaced with ")
Example::
doc.stag('img', src = '/salmon-plays-piano.jpg')
# appends <img src="/salmon-plays-piano.jpg" /> to the document
If you want to produce self closing tags without the ending slash (HTML5 style),
use the stag_end parameter of the SimpleDoc constructor at the creation of the
SimpleDoc instance.
Example::
>>> doc = SimpleDoc(stag_end = '>')
>>> doc.stag('br')
>>> doc.getvalue()
'<br>'
"""
if args or kwargs:
self._append("<%s %s%s" % (
tag_name,
dict_to_attrs(_attributes(args, kwargs)),
self._stag_end
))
else:
self._append("<%s%s" % (tag_name, self._stag_end))
def cdata(self, strg, safe = False):
"""
appends a CDATA section containing the supplied string
You don't have to worry about potential ']]>' sequences that would terminate
the CDATA section. They are replaced with ']]]]><![CDATA[>'.
If you're sure your string does not contain ']]>', you can pass `safe = True`.
If you do that, your string won't be searched for ']]>' sequences.
"""
self._append('<![CDATA[')
if safe:
self._append(strg)
else:
self._append(strg.replace(']]>', ']]]]><![CDATA[>'))
self._append(']]>')
def getvalue(self):
"""
returns the whole document as a single string
"""
return ''.join(self.result)
def tagtext(self):
"""
return a triplet composed of::
. the document itself
. its tag method
. its text method
Example::
doc, tag, text = SimpleDoc().tagtext()
with tag('h1'):
text('Hello world!')
print(doc.getvalue()) # prints <h1>Hello world!</h1>
"""
return self, self.tag, self.text
def ttl(self):
"""
returns a quadruplet composed of::
. the document itself
. its tag method
. its text method
. its line method
Example::
doc, tag, text, line = SimpleDoc().ttl()
with tag('ul', id='grocery-list'):
line('li', 'Tomato sauce', klass="priority")
line('li', 'Salt')
line('li', 'Pepper')
print(doc.getvalue())
"""
return self, self.tag, self.text, self.line
def add_class(self, *classes):
"""
adds one or many elements to the html "class" attribute of the current tag
Example::
user_logged_in = False
with tag('a', href="/nuclear-device", klass = 'small'):
if not user_logged_in:
doc.add_class('restricted-area')
text("Our new product")
print(doc.getvalue())
# prints <a class="restricted-area small" href="/nuclear-device"></a>
"""
self._set_classes(
self._get_classes().union(classes)
)
def discard_class(self, *classes):
"""
remove one or many elements from the html "class" attribute of the current
tag if they are present (do nothing if they are absent)
"""
self._set_classes(
self._get_classes().difference(classes)
)
def toggle_class(self, elem, active):
"""
if active is a truthy value, ensure elem is present inside the html
"class" attribute of the current tag, otherwise (if active is falsy)
ensure elem is absent
"""
classes = self._get_classes()
if active:
classes.add(elem)
else:
classes.discard(elem)
self._set_classes(classes)
def _get_classes(self):
try:
current_classes = self.current_tag.attrs['class']
except KeyError:
return set()
else:
return set(current_classes.split())
def _set_classes(self, classes_set):
if classes_set:
self.current_tag.attrs['class'] = ' '.join(classes_set)
else:
try:
del self.current_tag.attrs['class']
except KeyError:
pass
def html_escape(s):
if isinstance(s,(int,float)):
return str(s)
try:
return s.replace("&", "&").replace("<", "<").replace(">", ">")
except AttributeError:
raise TypeError(
"You can only insert a string, an int or a float inside a xml/html text node. "
"Got %s (type %s) instead." % (repr(s), repr(type(s)))
)
def attr_escape(s):
if isinstance(s,(int,float)):
return str(s)
try:
return s.replace("&", "&").replace("<", "<").replace('"', """)
except AttributeError:
raise TypeError(
"xml/html attributes should be passed as strings, ints or floats. "
"Got %s (type %s) instead." % (repr(s), repr(type(s)))
)
ATTR_NO_VALUE = object()
def dict_to_attrs(dct):
return ' '.join(
(key if value is ATTR_NO_VALUE
else '%s="%s"' % (key, attr_escape(value)))
for key,value in dct.items()
)
def _attributes(args, kwargs):
lst = []
for arg in args:
if isinstance(arg, tuple):
lst.append(arg)
elif isinstance(arg, str):
lst.append((arg, ATTR_NO_VALUE))
else:
raise ValueError(
"Couldn't make a XML or HTML attribute/value pair out of %s."
% repr(arg)
)
result = dict(lst)
result.update(
(('class', value) if key == 'klass' else (key, value))
for key,value in kwargs.items()
)
return result
```
#### File: PetServiceComputing_Web/rest_client/controller.py
```python
from flask import Blueprint
from flask import render_template
import requests
from flask import request
from rest_server.resource_Map import KakaoMap_Resource, Geocode_Resource
from rest_client.blue_print import BluePrint
from rest_server.resource_Animal import Animal_Resource, Location_Resource
import random
# 지도검색시스템 관리영역
@BluePrint.route('/maps', methods=['POST', 'GET'])
def map():
address = "음성군 대소면" # 초기주소
if request.method == 'POST':
address = request.form['address'] # 사용자가 입력한 주소값
pos = Geocode_Resource().get(address) # 주소값을 바탕으로 geocode정보 반환 및 저장
if pos == None:
pos = Geocode_Resource().get('음성군 대소면')
return render_template(
'map_template.html', pos_y=pos['idx_2'], pos_x=pos['idx_1'], nav_menu="map"
)
# 유기동물정보조회 코드영역
animal_list = Animal_Resource().get_searchAnimal('', '', '', '', '', '')
# 사용자가 입력한 여러조건 값을 토대로 유기동물데이터를 요청
# 반환된 search_list에서 데이터파싱과정을 거쳐 다시 사용자에게 전달
@BluePrint.route('/dashboard', methods=['POST', 'GET'])
def dashboard():
if request.method == 'POST':
start_day = request.form['start_day'].replace("-", "")
end_day = request.form['end_day'].replace("-", "")
animal_kinds = request.form['animal_kinds']
sido_kinds = '' if request.form['sido_kinds'] == '-선택-' else request.form['sido_kinds']
sigungu_kinds = '' if request.form['sigungu_kinds'] == '-선택-' else request.form['sigungu_kinds']
neutralization = request.form.getlist('neutralization')
search_list = Animal_Resource().get_searchAnimal(start_day, end_day, animal_kinds,
sido_kinds, sigungu_kinds, ''.join(neutralization))
list_param = []
if search_list != None:
if len(search_list) < 15:
for item in search_list:
list_param.append(item)
else:
for cnt in range(0, 15):
list_param.append(search_list[cnt])
return render_template(
'dashboard.html', nav_menu="dashboard", animal_list=list_param
)
else:
list_param = []
random.shuffle(animal_list)
if animal_list != None:
if len(animal_list) < 15:
for item in animal_list:
list_param.append(item)
else:
for cnt in range(0, 15):
list_param.append(animal_list[cnt])
return render_template(
'dashboard.html', nav_menu="dashboard", animal_list=list_param
)
# 실행템플릿 표시
# 위 함수와 내용이 유사
@BluePrint.route('/execute', methods=['POST', 'GET'])
def execute():
if request.method == 'POST':
start_day = request.form['start_day'].replace("-", "")
end_day = request.form['end_day'].replace("-", "")
animal_kinds = request.form['animal_kinds']
newtralization = request.form['newtralization_kinds']
geocode = Geocode_Resource().getFormattedAddress(
request.form['location'])
location = [] if geocode == None else geocode.split(' ')
sido_code = '' if len(
location) < 2 else Location_Resource().get_sidocode(location[1])
sigungu_code = '' if len(location) < 3 else Location_Resource().get_sigungucode(
sido_code, location[2])
search_list = Animal_Resource().get_searchAnimal(
start_day, end_day, animal_kinds, sido_code, sigungu_code, newtralization)
list_param = []
if search_list != None:
if len(search_list) < 15:
print(search_list)
for item in search_list:
list_param.append(item)
else:
if type(search_list).__name__ != 'list':
list_param.append([search_list])
else:
for cnt in range(0, 15):
list_param.append(search_list[cnt])
return render_template('execution_template.html', nav_menu="execute", animal_list=list_param)
return render_template('execution_template.html', nav_menu="execute", animal_list=[])
# 가이드템플릿 표시
@BluePrint.route('/guide')
def guide():
return render_template('guide_template.html', nav_menu="guide")
# 동물상세정보팝업창호출
@BluePrint.route('/animal_info')
def animal_info():
return render_template(
'animal_info.html'
)
# 보호업체상세정보팝업창호출
@BluePrint.route('/shelter_info')
def shelter_info():
return render_template(
'shelter_info.html'
)
``` |
{
"source": "jmkd3v/ro.py",
"score": 3
} |
#### File: ro.py/examples/filter_wall.py
```python
from ro_py import Client
import asyncio
group_id = 1
swear_words = ["cow"]
client = Client("COOKIE")
async def on_wall_post(post):
for word in swear_words:
if word in post.body.lower():
await post.delete()
async def main():
group = await client.get_group(group_id)
group.events.bind(on_wall_post, client.events.on_wall_post)
if __name__ == '__main__':
asyncio.get_event_loop().run_until_complete(main())
asyncio.get_event_loop().run_forever()
```
#### File: ro.py/examples/wall_commands.py
```python
from ro_py import Client
import asyncio
group_id = 2695946 # group id
auto_delete = False # Automatically delete the wall post when the command is executed
prefix = "!" # prefix for commands
allowed_roles = [255, 254] # roles allowed to use commands
client = Client("COOKIE")
async def on_wall_post(post):
print('new post from:', post.poster.name)
# Check if the post starts with prefix.
if post.body.startswith(prefix):
# Get the user that posted.
member = await client.group.get_member_by_id(post.poster.id)
# Check if the member is allowed to execute commands.
if member.role.rank in allowed_roles:
# set args and command variables.
args = post.body.split(" ")
command = args[0].replace(prefix, "")
# check if we need to delete the wall post
if auto_delete:
# delete the post
await post.delete()
# !promote <USERNAME>
# Promotes the user in the group.
if command == "promote":
target = await client.group.get_member_by_username(args[1])
old_role, new_role = await target.promote()
print(
f'[!] {target.name} ({target.id}) was promoted from {old_role.name} to {new_role.name} by {member.name} ({member.id})')
# <PREFIX>demote <USERNAME>
# Demotes a user in the group.
if command == "demote":
target = await client.group.get_member_by_username(args[1])
old_role, new_role = await target.demote()
print(
f'[!] {target.name} ({target.id}) was demoted from {old_role.name} to {new_role.name} by {member.name} ({member.id})')
# <PREFIX>setrank <USERNAME> <ROLE_NAME>
# Sets the rank of a user.
if command == "setrank":
target = await client.group.get_member_by_username(args[1])
roles = await client.group.get_roles()
for role in roles:
if role.name == args[2]:
await target.setrank(role.id)
# <PREFIX>shout <MESSAGE>
# shouts something to the group.
if command == "shout":
args.pop(0)
content = " ".join(args)
await client.group.update_shout(content)
async def main():
client.group = await client.get_group(group_id)
client.group.events.bind(on_wall_post, client.events.on_wall_post)
if __name__ == '__main__':
asyncio.get_event_loop().run_until_complete(main())
asyncio.get_event_loop().run_forever()
```
#### File: ro.py/ro_py/assets.py
```python
from ro_py.utilities.clientobject import ClientObject
from ro_py.utilities.errors import NotLimitedError
from ro_py.economy import LimitedResaleData
from ro_py.utilities.asset_type import AssetTypes
import iso8601
import asyncio
import copy
from ro_py.utilities.url import url
endpoint = url("api")
class Reseller:
def __init__(self, user, user_asset):
self.user = user
self.user_asset = user_asset
class Asset(ClientObject):
"""
Represents an asset.
Parameters
----------
cso : ro_py.utilities.clientobject.ClientSharedObject
CSO
asset_id
ID of the asset.
"""
def __init__(self, cso, asset_id):
super().__init__()
self.id = asset_id
self.cso = cso
self.requests = cso.requests
self.events = Events(cso, self)
self.target_id = None
self.product_type = None
self.asset_id = None
self.product_id = None
self.name = None
self.description = None
self.asset_type_id = None
self.asset_type_name = None
self.creator = None
self.created = None
self.updated = None
self.price = None
self.is_new = None
self.is_for_sale = None
self.is_public_domain = None
self.is_limited = None
self.is_limited_unique = None
self.minimum_membership_level = None
self.content_rating_type_id = None
async def update(self):
"""
Updates the asset's information.
"""
asset_info_req = await self.requests.get(
url=endpoint + "marketplace/productinfo",
params={
"assetId": self.id
}
)
asset_info = asset_info_req.json()
self.target_id = asset_info["TargetId"]
self.product_type = asset_info["ProductType"]
self.asset_id = asset_info["AssetId"]
self.product_id = asset_info["ProductId"]
self.name = asset_info["Name"]
self.description = asset_info["Description"]
self.asset_type_id = asset_info["AssetTypeId"]
for key, value in AssetTypes._member_map_.items():
if value == self.asset_type_id:
self.asset_type_name = key
# if asset_info["Creator"]["CreatorType"] == "User":
# self.creator = User(self.requests, asset_info["Creator"]["Id"])
# if asset_info["Creator"]["CreatorType"] == "Group":
# self.creator = Group(self.requests, asset_info["Creator"]["CreatorTargetId"])
self.created = iso8601.parse_date(asset_info["Created"])
self.updated = iso8601.parse_date(asset_info["Updated"])
self.price = asset_info["PriceInRobux"]
self.is_new = asset_info["IsNew"]
self.is_for_sale = asset_info["IsForSale"]
self.is_public_domain = asset_info["IsPublicDomain"]
self.is_limited = asset_info["IsLimited"]
self.is_limited_unique = asset_info["IsLimitedUnique"]
self.minimum_membership_level = asset_info["MinimumMembershipLevel"]
self.content_rating_type_id = asset_info["ContentRatingTypeId"]
async def get_remaining(self):
"""
Gets the remaining amount of this asset. (used for Limited U items)
Returns
-------
int
"""
asset_info_req = await self.requests.get(
url=endpoint + "marketplace/productinfo",
params={
"assetId": self.asset_id
}
)
asset_info = asset_info_req.json()
return asset_info["Remaining"]
async def get_limited_resale_data(self):
"""
Gets the limited resale data
Returns
-------
LimitedResaleData
"""
if self.is_limited:
resale_data_req = await self.requests.get(
f"https://economy.roblox.com/v1/assets/{self.asset_id}/resale-data")
return LimitedResaleData(resale_data_req.json())
else:
raise NotLimitedError("You can only read this information on limited items.")
class UserAsset(Asset):
def __init__(self, requests, asset_id, user_asset_id):
super().__init__(requests, asset_id)
self.requests = requests
self.user_asset_id = user_asset_id
async def get_resellers(self):
r = await self.requests.get(
url=f"https://economy.roblox.com/v1/assets/{self.id}/resellers?limit=10"
)
data = r.json()
resellers = []
for reseller in data['data']:
resellers.append(reseller(self.cso.client.get_user(reseller['seller']['id'])))
class Events:
def __init__(self, cso, asset):
self.cso = cso
self.asset = asset
def bind(self, func, event, delay=15):
if event == self.cso.client.events.on_asset_change:
return asyncio.create_task(self.on_asset_change(func, delay))
async def on_asset_change(self, func, delay):
await self.asset.update()
old_asset = copy.copy(self.asset)
while True:
await asyncio.sleep(delay)
await self.asset.update()
has_changed = False
for attr, value in old_asset.__dict__.items():
if getattr(self.asset, attr) != value:
has_changed = True
if has_changed:
if asyncio.iscoroutinefunction(func):
await func(old_asset, self.asset)
else:
func(old_asset, self.asset)
old_asset = copy.copy(self.asset)
```
#### File: ro.py/ro_py/captcha.py
```python
class UnsolvedLoginCaptcha:
def __init__(self, data, pkey):
self.pkey = pkey
self.token = data["token"]
self.url = f"https://roblox-api.arkoselabs.com/fc/api/nojs/" \
f"?pkey={pkey}" \
f"&session={self.token.split('|')[0]}" \
f"&lang=en"
self.challenge_url = data["challenge_url"]
self.challenge_url_cdn = data["challenge_url_cdn"]
self.noscript = data["noscript"]
class UnsolvedCaptcha:
def __init__(self, pkey):
self.pkey = pkey
self.url = f"https://roblox-api.arkoselabs.com/fc/api/nojs/" \
f"?pkey={pkey}" \
f"&lang=en"
class CaptchaMetadata:
def __init__(self, data):
self.fun_captcha_public_keys = data["funCaptchaPublicKeys"]
```
#### File: ro.py/ro_py/economy.py
```python
from ro_py.utilities.url import url
endpoint = url("economy")
class Currency:
"""
Represents currency data.
"""
def __init__(self, currency_data):
self.robux = currency_data["robux"]
class LimitedResaleData:
"""
Represents the resale data of a limited item.
"""
def __init__(self, resale_data):
self.asset_stock = resale_data["assetStock"]
self.sales = resale_data["sales"]
self.number_remaining = resale_data["numberRemaining"]
self.recent_average_price = resale_data["recentAveragePrice"]
self.original_price = resale_data["originalPrice"]
```
#### File: ro.py/ro_py/groups.py
```python
import copy
from enum import Enum
import iso8601
import asyncio
from ro_py.wall import Wall
from ro_py.roles import Role
from ro_py.events import Event
from ro_py.users import BaseUser
from typing import Tuple, Callable
from ro_py.events import EventTypes
from ro_py.utilities.errors import NotFound
from ro_py.bases.baseuser import PartialUser
from ro_py.utilities.pages import Pages, SortOrder
from ro_py.utilities.clientobject import ClientObject
from ro_py.utilities.url import url
endpoint = url("groups")
class Shout:
"""
Represents a group shout.
"""
def __init__(self, cso, group, shout_data):
self.cso = cso
self.requests = cso.requests
self.group = group
self.data = shout_data
self.body = shout_data["body"]
self.created = iso8601.parse_date(shout_data["created"])
self.updated = iso8601.parse_date(shout_data["updated"])
self.poster = PartialUser(cso, shout_data["poster"])
def __str__(self):
return self.body
async def __call__(self, message):
"""
Updates the shout of the group.
Please note that doing so will completely delete this Shout object and return a new Shout object.
The parent group's shout parameter will also be updated accordingly.
Parameters
----------
message : str
Message that will overwrite the current shout of a group.
Returns
-------
ro_py.groups.Shout
"""
shout_req = await self.requests.patch(
url=endpoint + f"/v1/groups/{self.group.id}/status",
data={
"message": message
}
)
self.group.shout = Shout(self.cso, self.group, shout_req.json())
return self.group.shout
class JoinRequest:
def __init__(self, cso, data, group):
self.requests = cso.requests
self.group = group
self.requester = PartialUser(cso, data['requester'])
self.created = iso8601.parse_date(data['created'])
async def accept(self):
accept_req = await self.requests.post(
url=endpoint + f"/v1/groups/{self.group.id}/join-requests/users/{self.requester.id}"
)
return accept_req.status_code == 200
async def decline(self):
accept_req = await self.requests.delete(
url=endpoint + f"/v1/groups/{self.group.id}/join-requests/users/{self.requester.id}"
)
return accept_req.status_code == 200
class Actions(Enum):
delete_post = "deletePost"
remove_member = "removeMember"
accept_join_request = "acceptJoinRequest"
decline_join_request = "declineJoinRequest"
post_shout = "postShout"
change_rank = "changeRank"
buy_ad = "buyAd"
send_ally_request = "sendAllyRequest"
create_enemy = "createEnemy"
accept_ally_request = "acceptAllyRequest"
decline_ally_request = "declineAllyRequest"
delete_ally = "deleteAlly"
add_group_place = "addGroupPlace"
delete_group_place = "deleteGroupPlace"
create_items = "createItems"
configure_items = "configureItems"
spend_group_funds = "spendGroupFunds"
change_owner = "changeOwner"
delete = "delete"
adjust_currency_amounts = "adjustCurrencyAmounts"
abandon = "abandon"
claim = "claim"
Rename = "rename"
change_description = "changeDescription"
create_group_asset = "createGroupAsset"
upload_group_asset = "uploadGroupAsset"
configure_group_asset = "configureGroupAsset"
revert_group_asset = "revertGroupAsset"
create_group_developer_product = "createGroupDeveloperProduct"
configure_group_game = "configureGroupGame"
lock = "lock"
unlock = "unlock"
create_game_pass = "<PASSWORD>GamePass"
create_badge = "createBadge"
configure_badge = "configureBadge"
save_place = "savePlace"
publish_place = "publishPlace"
invite_to_clan = "inviteToClan"
kick_from_clan = "kickFromClan"
cancel_clan_invite = "cancelClanInvite"
buy_clan = "buyClan"
class Action:
def __init__(self, cso, data, group):
self.group = group
self.actor = Member(cso, data['actor']['user']['userId'], data['actor']['user']['username'], group, Role(cso, group, data['actor']['role']))
self.action = data['actionType']
self.created = iso8601.parse_date(data['created'])
self.data = data['description']
def action_handler(cso, data, args):
actions = []
for action in data:
actions.append(Action(cso, action, args))
return actions
def join_request_handler(cso, data, args):
join_requests = []
for request in data:
join_requests.append(JoinRequest(cso, request, args))
return join_requests
def member_handler(cso, data, args):
members = []
for member in data:
role = Role(cso, args, member['role'])
members.append(Member(cso, member['user']['userId'], member['user']['username'], args, role))
return members
class Group(ClientObject):
"""
Represents a group.
"""
def __init__(self, cso, group_id):
super().__init__()
self.cso = cso
"""Client Shared Object"""
self.requests = cso.requests
"Requests object."
self.id = group_id
"Group ID."
self.wall = Wall(self.cso, self)
"""Wall object."""
self.name = None
"""Group name."""
self.description = None
"""Group description."""
self.owner = None
"""Group owner."""
self.member_count = None
"""Group member count."""
self.is_builders_club_only = None
"""True if the group is Builders Club (Premium) only. This seems to have been removed."""
self.public_entry_allowed = None
"""Public entry allowed (private/public group)"""
self.shout = None
"""Current group shout (Shout)"""
self.events = Events(cso, self)
"""Events object."""
self.is_locked = False
"""True if this is a locked group."""
async def update(self):
"""
Updates the group's information.
"""
group_info_req = await self.requests.get(endpoint + f"/v1/groups/{self.id}")
group_info = group_info_req.json()
self.name = group_info["name"]
self.description = group_info["description"]
self.owner = await self.cso.client.get_user(group_info["owner"]["userId"])
self.member_count = group_info["memberCount"]
self.is_builders_club_only = group_info["isBuildersClubOnly"]
self.public_entry_allowed = group_info["publicEntryAllowed"]
if group_info.get('shout'):
self.shout = Shout(self.cso, self, group_info['shout'])
else:
self.shout = None
if "isLocked" in group_info:
self.is_locked = group_info["isLocked"]
async def update_shout(self, message):
"""
Updates the shout of the group.
DEPRECATED: Just call group.shout()
Parameters
----------
message : str
Message that will overwrite the current shout of a group.
Returns
-------
int
"""
return await self.shout(message)
async def get_roles(self):
"""
Gets all roles of the group.
Returns
-------
list
"""
role_req = await self.requests.get(
url=endpoint + f"/v1/groups/{self.id}/roles"
)
roles = []
for role in role_req.json()['roles']:
roles.append(Role(self.cso, self, role))
return roles
async def get_member_by_id(self, user_id):
# Get list of group user is in.
member_req = await self.requests.get(
url=endpoint + f"/v2/users/{user_id}/groups/roles"
)
data = member_req.json()
# Find group in list.
group_data = None
for group in data['data']:
if group['group']['id'] == self.id:
group_data = group
break
# Check if user is in group.
if not group_data:
raise NotFound(f"The user {user_id} was not found in group {self.id}")
# Create data to return.
role = Role(self.cso, self, group_data['role'])
member = Member(self.cso, user_id, "", self, role)
return member
async def get_member_by_username(self, name):
user = await self.cso.client.get_user_by_username(name)
member_req = await self.requests.get(
url=endpoint + f"/v2/users/{user.id}/groups/roles"
)
data = member_req.json()
# Find group in list.
group_data = None
for group in data['data']:
if group['group']['id'] == self.id:
group_data = group
break
# Check if user is in group.
if not group_data:
raise NotFound(f"The user {name} was not found in group {self.id}")
# Create data to return.
role = Role(self.cso, self, group_data['role'])
member = Member(self.cso, user.id, user.name, self, role)
return member
async def get_join_requests(self, sort_order=SortOrder.Ascending, limit=100):
pages = Pages(
cso=self.cso,
url=endpoint + f"/v1/groups/{self.id}/join-requests",
sort_order=sort_order,
limit=limit,
handler=join_request_handler,
handler_args=self
)
await pages.get_page()
return pages
async def get_members(self, sort_order=SortOrder.Ascending, limit=100):
pages = Pages(
cso=self.cso,
url=endpoint + f"/v1/groups/{self.id}/users?limit=100&sortOrder=Desc",
sort_order=sort_order,
limit=limit,
handler=member_handler,
handler_args=self
)
await pages.get_page()
return pages
async def get_audit_logs(self, action_filter: Actions = None, sort_order=SortOrder.Ascending, limit=100):
parameters = {}
if action_filter:
parameters['actionType'] = action_filter
pages = Pages(
cso=self.cso,
url=endpoint + f"/v1/groups/{self.id}/audit-log",
handler=action_handler,
extra_parameters=parameters,
handler_args=self,
limit=limit,
sort_order=sort_order
)
await pages.get_page()
return pages
class PartialGroup:
"""
Represents a group with less information.
Different information will be present here in different circumstances.
If it was generated as a game owner, it might only contain an ID and a name.
If it was generated from, let's say, groups/v2/users/userid/groups/roles, it'll also contain a member count.
"""
def __init__(self, cso, data):
self.cso = cso
self.requests = cso.requests
self.id = data["id"]
self.name = data["name"]
self.member_count = None
if "memberCount" in data:
self.member_count = data["memberCount"]
async def expand(self):
return await self.cso.client.get_group(self.id)
class Member(BaseUser):
"""
Represents a user in a group.
Parameters
----------
cso : ro_py.utilities.requests.Requests
Requests object to use for API requests.
user_id : int
The id of a user.
name : str
The name of the user.
group : ro_py.groups.Group
The group the user is in.
role : ro_py.roles.Role
The role the user has is the group.
"""
def __init__(self, cso, user_id, name, group, role):
super().__init__(cso, user_id)
self.name = name
self.role = role
self.group = group
async def update_role(self):
"""
Updates the role information of the user.
Returns
-------
ro_py.roles.Role
"""
member_req = await self.requests.get(
url=endpoint + f"/v2/users/{self.id}/groups/roles"
)
data = member_req.json()
for role in data['data']:
if role['group']['id'] == self.group.id:
self.role = Role(self.cso, self.group, role['role'])
break
return self.role
async def change_rank(self, num) -> Tuple[Role, Role]:
"""
Changes the users rank specified by a number.
If num is 1 the users role will go up by 1.
If num is -1 the users role will go down by 1.
Parameters
----------
num : int
How much to change the rank by.
"""
await self.update_role()
roles = await self.group.get_roles()
old_role = copy.copy(self.role)
role_counter = -1
for group_role in roles:
role_counter += 1
if group_role.rank == self.role.rank:
break
if not roles:
raise NotFound(f"User {self.id} is not in group {self.group.id}")
await self.setrank(roles[role_counter + num].id)
self.role = roles[role_counter + num].id
return old_role, roles[role_counter + num]
async def promote(self):
"""
Promotes the user.
Returns
-------
int
"""
return await self.change_rank(1)
async def demote(self):
"""
Demotes the user.
Returns
-------
int
"""
return await self.change_rank(-1)
async def setrank(self, rank):
"""
Sets the users role to specified role using rank id.
Parameters
----------
rank : int
Rank id
Returns
-------
bool
"""
rank_request = await self.requests.patch(
url=endpoint + f"/v1/groups/{self.group.id}/users/{self.id}",
data={
"roleId": rank
}
)
return rank_request.status_code == 200
async def setrole(self, role_num):
"""
Sets the users role to specified role using role number (1-255).
Parameters
----------
role_num : int
Role number (1-255)
Returns
-------
bool
"""
roles = await self.group.get_roles()
rank_role = None
for role in roles:
if role.rank == role_num:
rank_role = role
break
if not rank_role:
raise NotFound(f"Role {role_num} not found")
return await self.setrank(rank_role.id)
async def exile(self):
exile_req = await self.requests.delete(
url=endpoint + f"/v1/groups/{self.group.id}/users/{self.id}"
)
return exile_req.status_code == 200
class Events:
def __init__(self, cso, group):
self.cso = cso
self.group = group
def bind(self, func: Callable, event: EventTypes, delay: int = 15):
"""
Binds a function to an event.
Parameters
----------
func : function
Function that will be bound to the event.
event : ro_py.events.EventTypes
Event that will be bound to the function.
delay : int
How many seconds between each poll.
"""
if event == EventTypes.on_join_request:
event = Event(self.on_join_request, EventTypes.on_join_request, (func, None), delay)
self.cso.event_handler.add_event(event)
if event == EventTypes.on_wall_post:
event = Event(self.on_wall_post, EventTypes.on_wall_post, (func, None), delay)
self.cso.event_handler.add_event(event)
if event == EventTypes.on_group_change:
event = Event(self.on_group_change, EventTypes.on_group_change, (func, None), delay)
self.cso.event_handler.add_event(event)
asyncio.create_task(self.cso.event_handler.listen())
async def on_join_request(self, func: Callable, old_req, event: Event):
if not old_req:
current_group_reqs = await self.group.get_join_requests()
old_arguments = list(event.arguments)
old_arguments[1] = current_group_reqs.data[0].requester.id
return event.edit(arguments=tuple(old_arguments))
current_group_reqs = await self.group.get_join_requests()
current_group_reqs = current_group_reqs.data
if current_group_reqs[0].requester.id != old_req:
new_reqs = []
for request in current_group_reqs:
if request.requester.id == old_req:
break
new_reqs.append(request)
old_arguments = list(event.arguments)
old_arguments[1] = current_group_reqs[0].requester.id
event.edit(arguments=tuple(old_arguments))
for new_req in new_reqs:
asyncio.create_task(func(new_req))
async def on_wall_post(self, func: Callable, newest_wall_post, event: Event):
if not newest_wall_post:
current_wall_posts = await self.group.wall.get_posts(sort_order=SortOrder.Descending)
old_arguments = list(event.arguments)
old_arguments[1] = current_wall_posts.data[0].id
return event.edit(arguments=tuple(old_arguments))
current_wall_posts = await self.group.wall.get_posts(sort_order=SortOrder.Descending)
current_wall_posts = current_wall_posts.data
post = current_wall_posts[0]
if post.id != newest_wall_post:
new_posts = []
for post in current_wall_posts:
if post.id == newest_wall_post:
break
new_posts.append(post)
old_arguments = list(event.arguments)
old_arguments[1] = current_wall_posts[0].id
event.edit(arguments=tuple(old_arguments))
for new_post in new_posts:
asyncio.create_task(func(new_post))
async def on_group_change(self, func: Callable, current_group, event: Event):
if not current_group:
await self.group.update()
old_arguments = list(event.arguments)
old_arguments[1] = copy.copy(self.group)
return event.edit(arguments=tuple(old_arguments))
await self.group.update()
has_changed = False
for attr, value in current_group.__dict__.items():
other_value = getattr(self.group, attr)
if attr == "shout":
if str(value) != str(other_value):
has_changed = True
else:
continue
if other_value != value:
has_changed = True
if has_changed:
old_arguments = list(event.arguments)
old_arguments[1] = copy.copy(self.group)
event.edit(arguments=tuple(old_arguments))
asyncio.create_task(func(current_group, self.group))
"""
async def on_audit_log(self, func: Callable, delay: int):
audit_log = await self.group.get_audit_logs()
audit_log = audit_log.data[0]
while True:
await asyncio.sleep(delay)
new_audit = await self.group.get_audit_logs()
new_audits = []
for audit in new_audit.data:
if audit.created == audit_log.created:
print(audit.created, audit_log.created, audit.created == audit_log.created)
break
else:
print(audit.created, audit_log.created)
new_audits.append(audit)
if len(new_audits) > 0:
audit_log = new_audit.data[0]
for new in new_audits:
asyncio.create_task(func(new))
"""
```
#### File: ro.py/ro_py/wall.py
```python
import iso8601
from typing import List
from ro_py.captcha import UnsolvedCaptcha
from ro_py.bases.baseuser import PartialUser
from ro_py.utilities.pages import Pages, SortOrder
from ro_py.utilities.url import url
endpoint = url("groups")
class WallPost:
"""
Represents a Roblox wall post.
"""
def __init__(self, cso, wall_data, group):
self.cso = cso
self.requests = cso.requests
self.group = group
self.id = wall_data['id']
self.body = wall_data['body']
self.created = iso8601.parse_date(wall_data['created'])
self.updated = iso8601.parse_date(wall_data['updated'])
if wall_data['poster']:
self.poster = PartialUser(self.cso, wall_data['poster']['user'])
else:
self.poster = None
async def delete(self):
wall_req = await self.requests.delete(
url=endpoint + f"/v1/groups/{self.group.id}/wall/posts/{self.id}"
)
return wall_req.status_code == 200
def wall_post_handler(requests, this_page, args) -> List[WallPost]:
wall_posts = []
for wall_post in this_page:
wall_posts.append(WallPost(requests, wall_post, args))
return wall_posts
class Wall:
def __init__(self, cso, group):
self.cso = cso
self.requests = cso.requests
self.group = group
async def get_posts(self, sort_order=SortOrder.Ascending, limit=100):
wall_req = Pages(
cso=self.cso,
url=endpoint + f"/v2/groups/{self.group.id}/wall/posts",
sort_order=sort_order,
limit=limit,
handler=wall_post_handler,
handler_args=self.group
)
await wall_req.get_page()
return wall_req
async def post(self, content, captcha_key=None):
data = {
"body": content
}
if captcha_key:
data['captchaProvider'] = "PROVIDER_ARKOSE_LABS"
data['captchaToken'] = captcha_key
post_req = await self.requests.post(
url=endpoint + f"/v1/groups/2695946/wall/posts",
data=data,
quickreturn=True
)
if post_req.status_code == 403:
return UnsolvedCaptcha(pkey="<KEY>")
else:
return post_req.status_code == 200
``` |
{
"source": "Jmkernes/LSTM-RNN-in-numpy",
"score": 3
} |
#### File: Jmkernes/LSTM-RNN-in-numpy/test_gradient_checking.py
```python
from gradient_checking import *
import unittest
import numpy as np
from layers import *
# class TestNumGrad(unittest.TestCase):
# def test_num_grad(self):
if False:
# Matrix multiplication
N = 13
D = 8
H = 11
A = np.random.randn(N,D)
B = np.random.randn(D,H)
dout = np.random.randn(N,H)
dA = dout.dot(B.T)
dB = A.T.dot(dout)
func = lambda A: A.dot(B)
numA = num_grad(func, A, dout, fivept=True)
func = lambda B: A.dot(B)
numB = num_grad(func, B, dout, fivept=True)
print('-'*40)
print("Test matrix multiplication A*B")
print("Derivative dA: ",rel_error(numA, dA))
print("Derivative dB: ",rel_error(numB, dB))
# self.assertAlmostEqual(numA, analyticA)
# self.assertAlmostEqual(numB, analyticB)
# vector dot product
A = np.random.randn(7)
B = np.random.randn(7)
dout = np.random.randn()
func = lambda A: A.dot(B)
numA = num_grad(func, A, dout)
func = lambda B: A.dot(B)
numB = num_grad(func, B, dout)
print("\nTest vector dot product A*B")
print("dA error: ",rel_error(numA,dout*B))
print("dB error: ",rel_error(numB,dout*A.T))
# print(dout*B, numA)
if True:
N, T, H, D = 4, 5, 6, 7
print('-'*40)
print("Test affine_backward")
x = np.random.randn(N,D)
W = np.random.randn(D,H)
b = np.random.randn(H)
dout = np.random.randn(N,H)
h, cache = affine_forward(x, W, b)
dx, dW, db = affine_backward(dout, cache)
funcx = lambda x: affine_forward(x, W, b)[0]
funcW = lambda W: affine_forward(x, W, b)[0]
funcb = lambda b: affine_forward(x, W, b)[0]
num_dx = num_grad(funcx, x, dout)
num_dW = num_grad(funcW, W, dout)
num_db = num_grad(funcb, b, dout)
print(f"Testing x=({N,D}), W=({D,H}), b=({H})")
print("dx error:", rel_error(num_dx, dx))
print("dW error:", rel_error(num_dW, dW))
print("db error:", rel_error(num_db, db))
if True:
print('-'*40)
print("Test affine_all_backward")
N, T, H, V = 4, 5, 6, 7
x = np.random.randn(N,T,H)
W = np.random.randn(H,V)
b = np.random.randn(V)
dout = np.random.randn(N,T,V)
p, cache = affine_all_forward(x, W, b)
dx, dW, db = affine_all_backward(dout, cache)
funcx = lambda x: affine_all_forward(x, W, b)[0]
funcW = lambda W: affine_all_forward(x, W, b)[0]
funcb = lambda b: affine_all_forward(x, W, b)[0]
num_dx = num_grad(funcx, x, dout)
num_dW = num_grad(funcW, W, dout)
num_db = num_grad(funcb, b, dout)
print(f"Testing x=({N,D}), W=({D,H}), b=({H})")
print("dx error:", rel_error(num_dx, dx))
print("dW error:", rel_error(num_dW, dW))
print("db error:", rel_error(num_db, db))
##############################################
if False:
N, D, H = 13, 8, 11
print('-'*40)
print("Test vanilla_backward")
x = np.random.randn(N,D)
Wx = np.random.randn(D,H)
Wh = np.random.randn(H,H)
h_prev = np.random.randn(N,H)
b = np.random.randn(H)
dout = np.random.randn(N,H)
h, cache = vanilla_forward(x, Wx, b, h_prev, Wh)
dx, dWx, db, dh_prev, dWh = vanilla_backward(dout, cache)
funcx = lambda x: vanilla_forward(x, Wx, b, h_prev, Wh)[0]
funcWx = lambda Wx: vanilla_forward(x, Wx, b, h_prev, Wh)[0]
funcb = lambda b: vanilla_forward(x, Wx, b, h_prev, Wh)[0]
funch = lambda h_prev: vanilla_forward(x, Wx, b, h_prev, Wh)[0]
funcWh = lambda Wh: vanilla_forward(x, Wx, b, h_prev, Wh)[0]
num_dx = num_grad(funcx, x, dout)
num_dWx = num_grad(funcWx, Wx, dout)
num_db = num_grad(funcb, b, dout)
num_dh = num_grad(funch, h_prev, dout)
num_dWh = num_grad(funcWh, Wh, dout)
print(f"Testing x=({N,D}), Wx=({D,H}), b=({H}), h_prev=({N,H}), Wh=({H,H})")
print("dx error:", rel_error(num_dx, dx))
print("dWx error:", rel_error(num_dWx, dWx))
print("db error:", rel_error(num_db, db))
print("dh_prev error:", rel_error(num_dh, dh_prev))
print("dWh error:", rel_error(num_dWh, dWh))
##############################################
if False:
print('-'*40)
print("Test lstm_backward. There are two outputs c and h, \\\
so the total gradient is the sum from each contribution")
N, D, H = 7,6,8
x = np.random.randn(N,D)
Wx = np.random.randn(D,4*H)
Wh = np.random.randn(H,4*H)
h_prev = np.random.randn(N,H)
c_prev = np.random.randn(N,H)
b = np.random.randn(4*H)
dc = np.random.randn(N,H)
dh = np.random.randn(N,H)
c, h, cache = lstm_forward(x, Wx, b, h_prev, Wh, c_prev)
dx, dWx, db, dh_prev, dWh, dc_prev = lstm_backward(dc, dh, cache)
funcx1 = lambda x: lstm_forward(x, Wx, b, h_prev, Wh, c_prev)[0]
funcx2 = lambda x: lstm_forward(x, Wx, b, h_prev, Wh, c_prev)[1]
funcWx1 = lambda Wx: lstm_forward(x, Wx, b, h_prev, Wh, c_prev)[0]
funcWx2 = lambda Wx: lstm_forward(x, Wx, b, h_prev, Wh, c_prev)[1]
funcb1 = lambda b: lstm_forward(x, Wx, b, h_prev, Wh, c_prev)[0]
funcb2 = lambda b: lstm_forward(x, Wx, b, h_prev, Wh, c_prev)[1]
funch1 = lambda h_prev: lstm_forward(x, Wx, b, h_prev, Wh, c_prev)[0]
funch2 = lambda h_prev: lstm_forward(x, Wx, b, h_prev, Wh, c_prev)[1]
funcWh1 = lambda Wh: lstm_forward(x, Wx, b, h_prev, Wh, c_prev)[0]
funcWh2 = lambda Wh: lstm_forward(x, Wx, b, h_prev, Wh, c_prev)[1]
funcc1 = lambda c: lstm_forward(x, Wx, b, h_prev, Wh, c_prev)[0]
funcc2 = lambda c: lstm_forward(x, Wx, b, h_prev, Wh, c_prev)[1]
num_dx = num_grad(funcx1, x, dc) + num_grad(funcx2, x, dh)
num_dWx = num_grad(funcWx1, Wx, dc) + num_grad(funcWx2, Wx, dh)
num_db = num_grad(funcb1, b, dc) + num_grad(funcb2, b, dh)
num_dh = num_grad(funch1, h_prev, dc) + num_grad(funch2, h_prev, dh)
num_dWh = num_grad(funcWh1, Wh, dc) + num_grad(funcWh2, Wh, dh)
num_dc = num_grad(funcc1, c_prev, dc) + num_grad(funcc2, c_prev, dh)
print(f"Testing x={N,D}, Wx={D,H}, b={H}, h_prev={N,H}, Wh={H,H}")
print("dx error:", rel_error(num_dx, dx))
print("dWx error:", rel_error(num_dWx, dWx))
print("db error:", rel_error(num_db, db))
print("dh_prev error:", rel_error(num_dh, dh_prev))
print("dWh error:", rel_error(num_dWh, dWh))
print("dc_prev error:", rel_error(num_dc, dc_prev))
##############################################
if False:
print('-'*40)
print("Test softmax_loss.")
N, D, H = 7,6,8
y = np.random.randint(D,size=N)
x = np.random.randn(N,D)
loss, dx = softmax_loss(x,y)
func = lambda x: softmax_loss(x,y)[0]
num_dx = num_grad(func, x, 1)
print(f"Testing x={N,D}, y = {N,}")
print(f"Loss should be ~{np.log(N)}, computed value:{loss}")
print(f"dx error: {rel_error(num_dx, dx)}")
##############################################
if False:
print('-'*40)
print("Test vanilla_all_backward")
N, T, D, H = 5,6,3,7
x = np.random.randn(N,T,D)
Wx = np.random.randn(D,H)*.01
Wh = np.random.randn(H,H)*.01
h_prev = np.random.randn(N,H)
b = np.random.randn(H)*.01
dh = np.random.randn(N,T,H)
h, cache = vanilla_all_forward(x, Wx, b, h_prev, Wh)
dx, dWx, db, dWh = vanilla_all_backward(dh, cache)
funcx = lambda x: vanilla_all_forward(x, Wx, b, h_prev, Wh)[0]
funcWx = lambda Wx: vanilla_all_forward(x, Wx, b, h_prev, Wh)[0]
funcb = lambda b: vanilla_all_forward(x, Wx, b, h_prev, Wh)[0]
funcWh = lambda Wh: vanilla_all_forward(x, Wx, b, h_prev, Wh)[0]
num_dx = num_grad(funcx, x, dh)
num_dWx = num_grad(funcWx, Wx, dh)
num_db = num_grad(funcb, b, dh)
num_dWh = num_grad(funcWh, Wh, dh)
# print(num_dWx.shape)
# print(num_dx.shape,dx.shape)
# print(f"Testing x=({N,D}), Wx=({D,H}), b=({H}), h_prev=({N,H}), Wh=({H,H})")
print("dx error:", rel_error(num_dx, dx))
print("dWx error:", rel_error(num_dWx, dWx))
print("db error:", rel_error(num_db, db))
print("dWh error:", rel_error(num_dWh, dWh))
##############################################
if False:
# Note there is only one backprop input (dh) in contrast to the single layer case
# we then only care about derivative w.r.t upstream dh.
print('-'*40)
print("Test lstm_all_backward.")
N, T, D, H = 7,4,6,8
x = np.random.randn(N,T,D)
Wx = np.random.randn(D,4*H)*.01
Wh = np.random.randn(H,4*H)*.01
h_prev = np.random.randn(N,H)
b = np.random.randn(4*H)*.01
dc = np.random.randn(N,T,H)
dh = np.random.randn(N,T,H)
h, cache = lstm_all_forward(x, Wx, b, h_prev, Wh)
dx, dWx, db, dWh = lstm_all_backward(dh, cache)
funcx2 = lambda x: lstm_all_forward(x, Wx, b, h_prev, Wh)[0]
funcWx2 = lambda Wx: lstm_all_forward(x, Wx, b, h_prev, Wh)[0]
funcb2 = lambda b: lstm_all_forward(x, Wx, b, h_prev, Wh)[0]
funcWh2 = lambda Wh: lstm_all_forward(x, Wx, b, h_prev, Wh)[0]
num_dx = num_grad(funcx2, x, dh)
num_dWx = num_grad(funcWx2, Wx, dh)
num_db = num_grad(funcb2, b, dh)
num_dWh = num_grad(funcWh2, Wh, dh)
# print(f"Testing x={N,D}, Wx={D,H}, b={H}, h_prev={N,H}, Wh={H,H}")
print("dx error:", rel_error(num_dx, dx))
print("dWx error:", rel_error(num_dWx, dWx))
print("db error:", rel_error(num_db, db))
print("dWh error:", rel_error(num_dWh, dWh))
if False:
print('-'*40)
print("Testing W_embed backward derivative")
N, T, D, V = 5, 10, 4, 9
inputs = np.random.randint(V, size =[N,T])
W_embed = np.random.randn(V,D)*.01
dout = np.random.randn(N,T,D)*.01
x, cache = embed_forward(inputs, W_embed)
dW_embed = embed_backward(dout, cache)
func = lambda W: embed_forward(inputs, W_embed)[0]
num_dW_embed = num_grad(func, W_embed, dout)
print("dW_embed error:", rel_error(num_dW_embed, dW_embed))
``` |
{
"source": "jmkinder1/code-samples",
"score": 4
} |
#### File: jmkinder1/code-samples/average.py
```python
import numpy as np
def running_average(x):
"""
Return cummulative average of an array.
"""
y = np.zeros(len(x)) # new array to store result
current_sum = 0.0 # running sum of elements of x
for i in range(len(x)):
current_sum += x[i] # increment sum
y[i] = current_sum / (i + 1.0) # update running average
return y
```
#### File: jmkinder1/code-samples/measurements.py
```python
import numpy as np
def crow(pointA, pointB):
"""
Distance between points A and B "as the crow flies."
pointA = (x1, y1)
pointB = (x2, y2)
returns sqrt( (x2-x1)**2 + (y2-y1)**2 )
"""
interval = np.sqrt( (pointA[0] - pointB[0])**2 + \
(pointA[1] - pointB[1])**2 )
return interval
def taxicab(pointA, pointB):
"""
Distance between points A and B "as the cab drives."
pointA = (x1, y1)
pointB = (x2, y2)
returns |x2-x1| + |y2-y1|
"""
interval = abs(pointB[0] - pointA[0]) + \
abs(pointB[1] - pointA[1])
return interval
def distance(pointA, pointB=(0,0), metric='taxi'):
"""
Return distance between points A and B. If metric is 'taxi', use taxicab
metric. Otherwise, use Euclidean distance.
pointA = (x1, y1)
pointB = (x2, y2)
"""
if metric == 'taxi':
return taxicab(pointA, pointB)
else:
return crow(pointA, pointB)
```
#### File: jmkinder1/code-samples/name_collision.py
```python
def name_collisions():
x, y = 'E', 'E'
def display():
x = 'L'
print( "Inside display() ..." )
print( "x= {}\ny= {}\nz= {}".format(x,y,z) )
display()
print( "Inside name_collision() ..." )
print( "x= {}\ny= {}\nz= {}".format(x,y,z) )
x, y, z = 'G', 'G', 'G'
name_collisions()
print( "Outside function ..." )
print( "x= {}\ny= {}\nz= {}".format(x,y,z) )
``` |
{
"source": "jmkktw26/stanCode_projects",
"score": 3
} |
#### File: stanCode_projects/break_out_game/breakoutgraphics.py
```python
from campy.graphics.gwindow import GWindow
from campy.graphics.gobjects import GOval, GRect, GLabel
from campy.gui.events.mouse import onmouseclicked, onmousemoved
from campy.graphics.gimage import GImage
import random
BRICK_SPACING = 5 # Space between bricks (in pixels). This space is used for horizontal and vertical spacing.
BRICK_WIDTH = 40 # Height of a brick (in pixels).
BRICK_HEIGHT = 15 # Height of a brick (in pixels).
BRICK_ROWS = 10 # Number of rows of bricks.
BRICK_COLS = 10 # Number of columns of bricks.
BRICK_OFFSET = 50 # Vertical offset of the topmost brick from the window top (in pixels).
BALL_RADIUS = 10 # Radius of the ball (in pixels).
PADDLE_WIDTH = 75 # Width of the paddle (in pixels).
PADDLE_HEIGHT = 15 # Height of the paddle (in pixels).
PADDLE_OFFSET = 50 # Vertical offset of the paddle from the window bottom (in pixels).
INITIAL_Y_SPEED = 7 # Initial vertical speed for the ball.
MAX_X_SPEED = 5 # Maximum initial horizontal speed for the ball.
class BreakoutGraphics:
def __init__(self, ball_radius=BALL_RADIUS, paddle_width=PADDLE_WIDTH,
paddle_height=PADDLE_HEIGHT, paddle_offset=PADDLE_OFFSET,
brick_rows=BRICK_ROWS, brick_cols=BRICK_COLS,
brick_width=BRICK_WIDTH, brick_height=BRICK_HEIGHT,
brick_offset=BRICK_OFFSET, brick_spacing=BRICK_SPACING,
title='Breakout'):
# Create a graphical window, with some extra space
window_width = brick_cols * (brick_width + brick_spacing) - brick_spacing
window_height = brick_offset + 3 * (brick_rows * (brick_height + brick_spacing) - brick_spacing)
self.window = GWindow(width=window_width, height=window_height, title=title)
# Create a paddle
self.paddle = GRect(paddle_width, paddle_height)
self.paddle.filled = True
self.paddle.fill_color = 'black'
self.paddle.color = 'black'
self.window.add(self.paddle, (self.window.width-self.paddle.width)/2, (self.window.height-paddle_offset))
# Center a filled ball in the graphical window
self.ball = GOval(ball_radius*2, ball_radius*2)
self.ball.color = 'black'
self.ball.filled = True
self.ball.fill_color = 'black'
self.window.add(self.ball, (self.window.width - self.ball.width) / 2,
(self.window.height - self.ball.height) / 2)
self.ball_r = ball_radius
# Default initial velocity for the ball
self.__dx = 0
self.__dy = INITIAL_Y_SPEED
self.set_x_velocity()
# Initialize our mouse listeners
onmousemoved(self.change_position)
# Draw bricks
self.brick_width = brick_width
self.brick_height = brick_height
self.brick_spacing = brick_spacing
self.y_position = brick_offset
for i in range(brick_rows):
self.x_position = 0
for j in range(brick_cols):
if i % 10 == 0 or i % 10 == 1: # make the bricks red
self.brick_color("red")
if i % 10 == 2 or i % 10 == 3: # make the bricks orange
self.brick_color("orange")
if i % 10 == 4 or i % 10 == 5: # make the bricks yellow
self.brick_color("yellow")
if i % 10 == 6 or i % 10 == 7: # make the bricks green
self.brick_color("green")
if i % 10 == 8 or i % 10 == 9: # make the bricks blue
self.brick_color("blue")
self.y_position = self.y_position + brick_height + brick_spacing
self.img0 =GImage('2.jpg')
self.img1 =GImage('2.jpg')
self.img2 =GImage('2.jpg')
self.window.add(self.img0,self.window.width-self.img1.width,0)
self.window.add(self.img1, self.window.width - self.img1.width*2, 0)
self.window.add(self.img2, self.window.width - self.img1.width*3, 0)
def brick_color(self, color):
self.brick = GRect(self.brick_width, self.brick_height)
self.brick.filled = True
self.brick.fill_color = color
self.brick.color = "black"
self.window.add(self.brick, self.x_position, self.y_position)
self.x_position = self.x_position + self.brick_spacing + self.brick_width
self.object = 0
def change_position(self, m):
self.paddle.x = m.x - self.paddle.width / 2
self.paddle.y = self.window.height - PADDLE_OFFSET
if self.paddle.x + self.paddle.width >= self.window.width:
self.paddle.x = self.window.width - self.paddle.width
if self.paddle.x <= 0:
self.paddle.x = 0
def set_x_velocity(self):
self.__dx = random.randint(1, MAX_X_SPEED)
if random.random() > 0.5:
self.__dx = -self.__dx
return self.__dx
def reset_ball(self):
self.window.add(self.ball, (self.window.width - self.ball.width) / 2,
(self.window.height - self.ball.height) / 2)
def remove_ball(self):
self.window.remove(self.ball)
def get_vy(self):
return self.__dy
def check_collision(self):
for i in (0,self.ball.width):
for j in (0,self.ball.height):
self.object=self.window.get_object_at(self.ball.x+i,self.ball.y+j)
if self.object is None:
continue
else:
return self.object
def get_brick_num(self):
self.brick_n = BRICK_COLS * BRICK_ROWS
return self.brick_n
``` |
{
"source": "jmkl/psutilitymix",
"score": 3
} |
#### File: jmkl/psutilitymix/generatethumb.py
```python
from PIL import Image
import os,PIL
import base64,os,json
def crop_image(image):
width, height = image.size
if width == height:
return image
offset = int(abs(height-width)/2)
if width>height:
image = image.crop([offset,0,width-offset,height])
else:
image = image.crop([0,offset,width,height-offset])
return image
def createThumb():
fixed_height =120
root = "J:\\texturelabs"
thumbdir = "J:\\texturelabs\\thumb"
thumbs = os.listdir(thumbdir)
files = os.listdir(root)
for name in files:
if(os.path.isfile(os.path.join(root,name)) and len(files)-1 > len(thumbs) and os.path.isfile(os.path.join(thumbdir,name))==False):
image = Image.open(os.path.join(root,name))
image = crop_image(image)
image = image.resize((fixed_height, fixed_height), PIL.Image.NEAREST)
image.save(os.path.join(root,"thumb",name))
def generatebase64():
imagedata = list()
for image in os.listdir("J:/texturelabs/thumb/"):
with open(os.path.join("J:/texturelabs/thumb/",image), "rb") as image_file:
data = base64.b64encode(image_file.read())
my_dic = {'base64':(data).decode('utf-8'),'name':image,'ext':os.path.splitext(image)[1][1:]}
imagedata.append(my_dic)
with open("J:/texturelabs/thumb/data.json","w") as f:
json.dump(imagedata,f)
if __name__ == "__main__":
createThumb()
generatebase64()
``` |
{
"source": "jmknoble/json-indent",
"score": 3
} |
#### File: tests/json_indent/test_json_indent.py
```python
from __future__ import absolute_import
import argparse
import collections
import io
import os
import os.path
import sys
import tempfile
import unittest
import json_indent.json_indent as ji
import json_indent.pyversion as pv
DUMMY_KEY_1 = "DummyKey1"
DUMMY_KEY_2 = "DummyKey2"
DUMMY_VALUE_1 = "DummyValue1"
DUMMY_VALUE_2 = "DummyValue2"
DUMMY_DICT = {DUMMY_KEY_1: DUMMY_VALUE_1}
DUMMY_JSON_DATA_DICT = {DUMMY_KEY_2: DUMMY_VALUE_2, DUMMY_KEY_1: [DUMMY_VALUE_1]}
DUMMY_JSON_DATA_ORDERED_DICT = collections.OrderedDict()
DUMMY_JSON_DATA_ORDERED_DICT[DUMMY_KEY_2] = DUMMY_VALUE_2
DUMMY_JSON_DATA_ORDERED_DICT[DUMMY_KEY_1] = [DUMMY_VALUE_1]
DUMMY_JSON_TEXT_UNFORMATTED = """{{
"{key2}": "{value2}", "{key1}": ["{value1}"]
}}
""".format(
key1=DUMMY_KEY_1, key2=DUMMY_KEY_2, value1=DUMMY_VALUE_1, value2=DUMMY_VALUE_2
)
DUMMY_JSON_TEXT_FORMATTED = """{{
"{key2}": "{value2}",
"{key1}": [
"{value1}"
]
}}
""".format(
key1=DUMMY_KEY_1, key2=DUMMY_KEY_2, value1=DUMMY_VALUE_1, value2=DUMMY_VALUE_2
)
DUMMY_JSON_TEXT_SORTED = """{{
"{key1}": [
"{value1}"
],
"{key2}": "{value2}"
}}
""".format(
key1=DUMMY_KEY_1, key2=DUMMY_KEY_2, value1=DUMMY_VALUE_1, value2=DUMMY_VALUE_2
)
DUMMY_JSON_TEXT_COMPACT = '{{"{key2}":"{value2}","{key1}":["{value1}"]}}\n'.format(
key1=DUMMY_KEY_1, key2=DUMMY_KEY_2, value1=DUMMY_VALUE_1, value2=DUMMY_VALUE_2
)
PLAIN_SEPARATORS = (",", ": ")
COMPACT_SEPARATORS = (",", ":")
PLAIN_KWARGS = {"indent": 4, "separators": PLAIN_SEPARATORS}
INDENT_KWARGS = {"indent": " " * 4, "separators": PLAIN_SEPARATORS}
SORTED_KWARGS = {"sort_keys": True, "indent": 4, "separators": PLAIN_SEPARATORS}
COMPACT_KWARGS = {"indent": None, "separators": COMPACT_SEPARATORS}
DUMMY_PROGRAM_NAME = "DummyProgramName"
DUMMY_PROGRAM_ARGS = [DUMMY_VALUE_1, DUMMY_VALUE_2]
CLI_OPTIONS = {
# dest: [option_strings]
"help": ["-h", "--help"],
"output_filename": ["-o", "--output"],
"inplace": ["-I", "--inplace", "--in-place"],
"pre_commit": ["--pre-commit"],
"show_changed": ["-C", "--changed", "--show-changed"],
"show_diff": ["-D", "--diff", "--show-diff"],
"compact": ["-c", "--compact"],
"indent": ["-n", "--indent"],
"sort_keys": ["-s", "--sort-keys"],
"debug": ["--debug"],
"completion_help": ["--completion-help"],
"bash_completion": ["--bash-completion"],
"version": ["-V", "--version"],
}
CLI_MULTI_OPTIONS = {
# dest: [{option_strings}, ...]
"newlines": [
{"--newlines"},
{"-L", "--linux"},
{"-N", "--native"},
{"-M", "--microsoft"},
],
}
CLI_ARGUMENTS = {
# dest: nargs
"input_filenames": "*"
}
DUMMY_PATH_1 = "DummyPath1"
DUMMY_PATH_2 = "DummyPath2"
DUMMY_PATHS = {DUMMY_PATH_1: None, "one/two/three": None, "/a/b/c": None, "-": None}
for path in DUMMY_PATHS:
normalized_path = (
"-"
if path == "-"
else os.path.normcase(os.path.normpath(os.path.realpath(path)))
)
DUMMY_PATHS[path] = normalized_path
ARGS_DEFAULT = []
ARGS_PLAIN = ["--indent", "4"]
ARGS_INDENT = ["--indent", " " * 4]
ARGS_SORTED = ["--indent", "4", "--sort-keys"]
ARGS_COMPACT = ["--compact"]
ARGS_DEBUG = ["--debug"] if "DEBUG" in os.environ else []
NEWLINE_ARGS_LINUX = [
["-L"],
["--newlines=linux"],
["--newlines", "linux"],
["--newlines", "macos"],
["--newlines", "unix"],
]
NEWLINE_ARGS_NATIVE = [
["-N"],
["--newlines", "native"],
]
NEWLINE_ARGS_MICROSOFT = [
["-M"],
["--newlines=microsoft"],
["--newlines", "microsoft"],
["--newlines", "dos"],
["--newlines", "msft"],
["--newlines", "windows"],
]
def deep_convert_to_plain_dict(an_odict):
"""
Recursively convert `an_odict` and any of its dictionary subelements from
`collections.OrderedDict`:py:class: to plain `dict`:py:class:
.. note:: This is naive, in that it will not properly handle dictionaries
with recursive object references.
:Args:
an_odict
a (presumably) `collections.OrderedDict`:py:class: to convert
:Returns:
an "unordered" (i.e., plain) `dict`:py:class: with all ordered
dictionaries converted to `dict`:py:class:
"""
a_dict = {}
for (key, value) in an_odict.items():
if type(value) is collections.OrderedDict:
a_dict[key] = deep_convert_to_plain_dict(value)
else:
a_dict[key] = value
return a_dict
class TestJsonIndent(unittest.TestCase):
def setUp(self):
# Create temporary file for read/write testing
self.infile = tempfile.NamedTemporaryFile(suffix=".json", delete=False)
self.outfile = tempfile.NamedTemporaryFile(suffix=".json", delete=False)
def tearDown(self):
for f in (self.infile, self.outfile):
f.close()
os.remove(f.name)
def dummy_cli_args(self):
return argparse.Namespace(
input_filenames=[],
output_filename=None,
inplace=False,
newlines="native",
compact=False,
indent=2,
sort_keys=False,
debug=False,
)
def assertOrderedDictEqual(self, odict1, odict2, strict=True):
"""
Assert that `odict1` and `odict2` are "equal" from the perspective of a
dictionary, but with an additional constraint of being ordered
dictionaries as well.
:Args:
odict1
a `collections.OrderedDict`:py:class: object
odict2
another `collections.OrderedDict`:py:class: object to compare with
`odict1`
strict
(OPTIONAL) If `True`-ish, assert the order of the keys in
`odict1` is equivalent to that of `odict2`.
"""
self.assertIs(type(odict1), collections.OrderedDict)
self.assertIs(type(odict2), collections.OrderedDict)
if strict:
self.assertListEqual(list(odict1.keys()), list(odict2.keys()))
dict1 = deep_convert_to_plain_dict(odict1)
dict2 = deep_convert_to_plain_dict(odict2)
self.assertDictEqual(dict1, dict2)
def assertDictishEqual(self, dictish1, dictish2, ordered, strict=True):
"""
Assert that `dictish1` and `dictish2` are "equal" from the perspective
of a dictionary, with a possible additional constraint on being ordered
dictionaries.
:Args:
dictish1
a `dict`-ish object
dictish2
another `dict`-ish object to compare with `dictish1`
ordered
if `True`-ish, compare as ordered dictionaries
strict
(OPTIONAL) If `True`-ish, and `ordered` is also `True`-ish,
assert the order of the keys in `odict1` is equivalent to that
of `odict2`.
"""
if ordered:
self.assertOrderedDictEqual(dictish1, dictish2, strict)
else:
self.assertDictEqual(dictish1, dictish2)
def test_JSI_100_load_json(self):
with open(self.infile.name, "w") as f:
f.write(DUMMY_JSON_TEXT_UNFORMATTED)
with open(self.infile.name, "r") as f:
json_data = ji.load_json(f)
self.assertDictishEqual(
json_data, DUMMY_JSON_DATA_ORDERED_DICT, ordered=True
)
for (ordered, expected_data) in [
(True, DUMMY_JSON_DATA_ORDERED_DICT),
(False, DUMMY_JSON_DATA_DICT),
]:
with open(self.infile.name, "r") as f:
json_data = ji.load_json(f, unordered=not ordered)
self.assertDictishEqual(json_data, expected_data, ordered=ordered)
with open(self.infile.name, "r") as f:
json_data = ji.load_json(f, sort_keys=not ordered)
self.assertDictishEqual(json_data, expected_data, ordered=ordered)
def test_JSI_101_load_json_with_text(self):
with open(self.infile.name, "w") as f:
f.write(DUMMY_JSON_TEXT_UNFORMATTED)
with open(self.infile.name, "r") as f:
(json_data, json_text) = ji.load_json(f, with_text=True)
self.assertDictishEqual(
json_data, DUMMY_JSON_DATA_ORDERED_DICT, ordered=True
)
self.assertEqual(json_text, DUMMY_JSON_TEXT_UNFORMATTED)
for (ordered, expected_data) in [
(True, DUMMY_JSON_DATA_ORDERED_DICT),
(False, DUMMY_JSON_DATA_DICT),
]:
with open(self.infile.name, "r") as f:
(json_data, json_text) = ji.load_json(
f, with_text=True, unordered=not ordered
)
self.assertDictishEqual(json_data, expected_data, ordered=ordered)
self.assertEqual(json_text, DUMMY_JSON_TEXT_UNFORMATTED)
with open(self.infile.name, "r") as f:
(json_data, json_text) = ji.load_json(
f, with_text=True, sort_keys=not ordered
)
self.assertDictishEqual(json_data, expected_data, ordered=ordered)
self.assertEqual(json_text, DUMMY_JSON_TEXT_UNFORMATTED)
def test_JSI_102_load_json_file(self):
with open(self.infile.name, "w") as f:
f.write(DUMMY_JSON_TEXT_UNFORMATTED)
with open(self.infile.name, "r") as f:
(json_data, json_text) = ji.load_json_file(f)
self.assertDictishEqual(
json_data, DUMMY_JSON_DATA_ORDERED_DICT, ordered=True
)
self.assertEqual(json_text, DUMMY_JSON_TEXT_UNFORMATTED)
for (ordered, expected_data) in [
(True, DUMMY_JSON_DATA_ORDERED_DICT),
(False, DUMMY_JSON_DATA_DICT),
]:
with open(self.infile.name, "r") as f:
(json_data, json_text) = ji.load_json_file(f, unordered=not ordered)
self.assertDictishEqual(json_data, expected_data, ordered=ordered)
self.assertEqual(json_text, DUMMY_JSON_TEXT_UNFORMATTED)
with open(self.infile.name, "r") as f:
(json_data, json_text) = ji.load_json_file(f, sort_keys=not ordered)
self.assertDictishEqual(json_data, expected_data, ordered=ordered)
self.assertEqual(json_text, DUMMY_JSON_TEXT_UNFORMATTED)
def test_JSI_103_load_json_text(self):
json_text = DUMMY_JSON_TEXT_UNFORMATTED
json_data = ji.load_json_text(json_text)
self.assertDictishEqual(json_data, DUMMY_JSON_DATA_ORDERED_DICT, ordered=True)
for (ordered, expected_data) in [
(True, DUMMY_JSON_DATA_ORDERED_DICT),
(False, DUMMY_JSON_DATA_DICT),
]:
json_data = ji.load_json_text(json_text, unordered=not ordered)
self.assertDictishEqual(json_data, expected_data, ordered=ordered)
json_data = ji.load_json_text(json_text, sort_keys=not ordered)
self.assertDictishEqual(json_data, expected_data, ordered=ordered)
def test_JSI_110_dump_json(self):
with open(self.outfile.name, "w") as f:
# Ensure file exists and is empty
pass
for (kwargs, json_data, expected_json_text) in [
(PLAIN_KWARGS, DUMMY_JSON_DATA_ORDERED_DICT, DUMMY_JSON_TEXT_FORMATTED),
(SORTED_KWARGS, DUMMY_JSON_DATA_DICT, DUMMY_JSON_TEXT_SORTED),
(COMPACT_KWARGS, DUMMY_JSON_DATA_ORDERED_DICT, DUMMY_JSON_TEXT_COMPACT),
]:
with open(self.outfile.name, "w") as f:
text = ji.dump_json(json_data, f, **kwargs)
self.assertEqual(text, expected_json_text)
with open(self.outfile.name, "r") as f:
self.assertEqual(f.read(), expected_json_text)
with open(self.outfile.name, "w") as f:
text = ji.dump_json(json_data, outfile=f, **kwargs)
self.assertEqual(text, expected_json_text)
with open(self.outfile.name, "r") as f:
self.assertEqual(f.read(), expected_json_text)
def test_JSI_111_dump_json_indent_string(self):
with open(self.outfile.name, "w") as f:
# Ensure file exists and is empty
pass
for (kwargs, json_data, expected_json_text) in [
(INDENT_KWARGS, DUMMY_JSON_DATA_ORDERED_DICT, DUMMY_JSON_TEXT_FORMATTED),
]:
if pv.python_version_is_less_than(3, 2):
with self.assertRaises(TypeError) as context: # noqa: F841
with open(self.outfile.name, "w") as f:
ji.dump_json(json_data, f, **kwargs)
errmsg = context.exception.args[0]
self.assertEqual(
errmsg, "can't multiply sequence by non-int of type 'str'"
)
else:
with open(self.outfile.name, "w") as f:
text = ji.dump_json(json_data, f, **kwargs)
self.assertEqual(text, expected_json_text)
with open(self.outfile.name, "r") as f:
self.assertEqual(f.read(), expected_json_text)
if pv.python_version_is_less_than(3, 2):
with self.assertRaises(TypeError) as context: # noqa: F841
with open(self.outfile.name, "w") as f:
ji.dump_json(json_data, outfile=f, **kwargs)
errmsg = context.exception.args[0]
self.assertEqual(
errmsg, "can't multiply sequence by non-int of type 'str'"
)
else:
with open(self.outfile.name, "w") as f:
text = ji.dump_json(json_data, outfile=f, **kwargs)
self.assertEqual(text, expected_json_text)
with open(self.outfile.name, "r") as f:
self.assertEqual(f.read(), expected_json_text)
def test_JSI_112_dump_json_to_text(self):
for (kwargs, json_data, expected_json_text) in [
(PLAIN_KWARGS, DUMMY_JSON_DATA_ORDERED_DICT, DUMMY_JSON_TEXT_FORMATTED),
(SORTED_KWARGS, DUMMY_JSON_DATA_DICT, DUMMY_JSON_TEXT_SORTED),
(COMPACT_KWARGS, DUMMY_JSON_DATA_ORDERED_DICT, DUMMY_JSON_TEXT_COMPACT),
]:
text = ji.dump_json(json_data, **kwargs)
self.assertEqual(text, expected_json_text)
text = ji.dump_json(json_data, outfile=None, **kwargs)
self.assertEqual(text, expected_json_text)
def test_JSI_113_dump_json_to_text_indent_string(self):
for (kwargs, json_data, expected_json_text) in [
(INDENT_KWARGS, DUMMY_JSON_DATA_ORDERED_DICT, DUMMY_JSON_TEXT_FORMATTED),
]:
if pv.python_version_is_less_than(3, 2):
with self.assertRaises(TypeError) as context: # noqa: F841
ji.dump_json(json_data, **kwargs)
errmsg = context.exception.args[0]
self.assertEqual(
errmsg, "can't multiply sequence by non-int of type 'str'"
)
else:
text = ji.dump_json(json_data, **kwargs)
self.assertEqual(text, expected_json_text)
if pv.python_version_is_less_than(3, 2):
with self.assertRaises(TypeError) as context: # noqa: F841
ji.dump_json(json_data, outfile=None, **kwargs)
errmsg = context.exception.args[0]
self.assertEqual(
errmsg, "can't multiply sequence by non-int of type 'str'"
)
else:
text = ji.dump_json(json_data, outfile=None, **kwargs)
self.assertEqual(text, expected_json_text)
def test_JSI_114_dump_json_file(self):
with open(self.outfile.name, "w") as f:
# Ensure file exists and is empty
pass
for (kwargs, json_data, expected_json_text) in [
(PLAIN_KWARGS, DUMMY_JSON_DATA_ORDERED_DICT, DUMMY_JSON_TEXT_FORMATTED),
(SORTED_KWARGS, DUMMY_JSON_DATA_DICT, DUMMY_JSON_TEXT_SORTED),
(COMPACT_KWARGS, DUMMY_JSON_DATA_ORDERED_DICT, DUMMY_JSON_TEXT_COMPACT),
]:
with open(self.outfile.name, "w") as f:
text = ji.dump_json_file(json_data, f, **kwargs)
self.assertEqual(text, expected_json_text)
with open(self.outfile.name, "r") as f:
self.assertEqual(f.read(), expected_json_text)
def test_JSI_115_dump_json_file_indent_string(self):
with open(self.outfile.name, "w") as f:
# Ensure file exists and is empty
pass
for (kwargs, json_data, expected_json_text) in [
(INDENT_KWARGS, DUMMY_JSON_DATA_ORDERED_DICT, DUMMY_JSON_TEXT_FORMATTED),
]:
if pv.python_version_is_less_than(3, 2):
with self.assertRaises(TypeError) as context: # noqa: F841
with open(self.outfile.name, "w") as f:
ji.dump_json_file(json_data, f, **kwargs)
errmsg = context.exception.args[0]
self.assertEqual(
errmsg, "can't multiply sequence by non-int of type 'str'"
)
else:
with open(self.outfile.name, "w") as f:
text = ji.dump_json_file(json_data, f, **kwargs)
self.assertEqual(text, expected_json_text)
with open(self.outfile.name, "r") as f:
self.assertEqual(f.read(), expected_json_text)
def test_JSI_116_dump_json_text(self):
for (kwargs, json_data, expected_json_text) in [
(PLAIN_KWARGS, DUMMY_JSON_DATA_ORDERED_DICT, DUMMY_JSON_TEXT_FORMATTED),
(SORTED_KWARGS, DUMMY_JSON_DATA_DICT, DUMMY_JSON_TEXT_SORTED),
(COMPACT_KWARGS, DUMMY_JSON_DATA_ORDERED_DICT, DUMMY_JSON_TEXT_COMPACT),
]:
text = ji.dump_json_text(json_data, **kwargs)
self.assertEqual(text, expected_json_text)
def test_JSI_117_dump_json_text_indent_string(self):
for (kwargs, json_data, expected_json_text) in [
(INDENT_KWARGS, DUMMY_JSON_DATA_ORDERED_DICT, DUMMY_JSON_TEXT_FORMATTED),
]:
if pv.python_version_is_less_than(3, 2):
with self.assertRaises(TypeError) as context: # noqa: F841
ji.dump_json_text(json_data, **kwargs)
errmsg = context.exception.args[0]
self.assertEqual(
errmsg, "can't multiply sequence by non-int of type 'str'"
)
else:
text = ji.dump_json_text(json_data, **kwargs)
self.assertEqual(text, expected_json_text)
def test_JSI_200_check_program_args(self):
(_prog, program_args) = ji._check_program_args(DUMMY_PROGRAM_ARGS)
self.assertListEqual(program_args, DUMMY_PROGRAM_ARGS)
sys.argv = [DUMMY_PROGRAM_NAME] + DUMMY_PROGRAM_ARGS
(_prog, program_args) = ji._check_program_args(tuple())
self.assertListEqual(program_args, DUMMY_PROGRAM_ARGS)
def test_JSI_210_setup_argparser(self):
argparser = ji._setup_argparser("prog")
self.assertIsInstance(argparser, argparse.ArgumentParser)
self.assertGreater(len(argparser.description), 0)
for action in argparser._actions:
self.assertTrue(
action.dest in CLI_OPTIONS
or action.dest in CLI_MULTI_OPTIONS
or action.dest in CLI_ARGUMENTS
)
if action.dest in CLI_OPTIONS:
self.assertSetEqual(
set(action.option_strings), set(CLI_OPTIONS[action.dest])
)
elif action.dest in CLI_MULTI_OPTIONS:
self.assertIn(
set(action.option_strings), CLI_MULTI_OPTIONS[action.dest]
)
else:
self.assertEqual(action.nargs, CLI_ARGUMENTS[action.dest])
def test_JSI_220_normalize_path(self):
for (path, expected_path) in DUMMY_PATHS.items():
test_path = ji._normalize_path(path)
self.assertEqual(test_path, expected_path)
def test_JSI_230_check_io_filenames_with_defaults(self):
cli_args = self.dummy_cli_args()
ji._check_input_and_output_filenames(cli_args)
self.assertListEqual(cli_args.input_filenames, ["-"])
self.assertEqual(cli_args.output_filename, "-")
def test_JSI_231_check_io_filenames_with_stdio(self):
cli_args = self.dummy_cli_args()
cli_args.input_filenames = ["-"]
cli_args.output_filename = "-"
ji._check_input_and_output_filenames(cli_args)
self.assertListEqual(cli_args.input_filenames, ["-"])
self.assertEqual(cli_args.output_filename, "-")
def test_JSI_232_check_io_filenames_with_input_filename(self):
cli_args = self.dummy_cli_args()
cli_args.input_filenames = [DUMMY_PATH_1]
ji._check_input_and_output_filenames(cli_args)
self.assertListEqual(cli_args.input_filenames, [DUMMY_PATH_1])
self.assertEqual(cli_args.output_filename, "-")
def test_JSI_233_check_io_filenames_with_multiple_input_filenames(self):
cli_args = self.dummy_cli_args()
cli_args.input_filenames = [DUMMY_PATH_1, DUMMY_PATH_2]
with self.assertRaises(RuntimeError) as context: # noqa: F841
ji._check_input_and_output_filenames(cli_args)
errmsg = context.exception.args[0]
self.assertEqual(
errmsg, "to process more than one input file at a time, use '--inplace'"
)
def test_JSI_234_check_io_filenames_with_same_filenames(self):
cli_args = self.dummy_cli_args()
cli_args.input_filenames = [DUMMY_PATH_1]
cli_args.output_filename = DUMMY_PATH_1
with self.assertRaises(RuntimeError) as context: # noqa: F841
ji._check_input_and_output_filenames(cli_args)
errmsg = context.exception.args[0]
self.assertTrue(errmsg.startswith("input file and output file are the same"))
def test_JSI_235_check_io_filenames_with_inplace(self):
cli_args = self.dummy_cli_args()
cli_args.input_filenames = [DUMMY_PATH_1]
cli_args.inplace = True
ji._check_input_and_output_filenames(cli_args)
def test_JSI_236_check_io_filenames_with_inplace_and_multiple_filenames(self):
cli_args = self.dummy_cli_args()
cli_args.input_filenames = [DUMMY_PATH_1, DUMMY_PATH_2]
cli_args.inplace = True
ji._check_input_and_output_filenames(cli_args)
def test_JSI_237_check_io_filenames_with_inplace_and_output_filename(self):
for output_filename in [DUMMY_PATH_2, "-"]:
cli_args = self.dummy_cli_args()
cli_args.input_filenames = [DUMMY_PATH_1]
cli_args.output_filename = output_filename
cli_args.inplace = True
with self.assertRaises(RuntimeError) as context: # noqa: F841
ji._check_input_and_output_filenames(cli_args)
errmsg = context.exception.args[0]
self.assertEqual(errmsg, "output files do not make sense with '--inplace'")
def test_JSI_238_check_io_filenames_with_inplace_and_stdio(self):
cli_args = self.dummy_cli_args()
cli_args.input_filenames = ["-"]
cli_args.inplace = True
with self.assertRaises(RuntimeError) as context: # noqa: F841
ji._check_input_and_output_filenames(cli_args)
errmsg = context.exception.args[0]
self.assertEqual(
errmsg, "reading from stdin does not make sense with '--inplace'"
)
def test_JSI_300_cli(self):
for (test_args, expected_json_text) in [
(ARGS_PLAIN, DUMMY_JSON_TEXT_FORMATTED),
(ARGS_SORTED, DUMMY_JSON_TEXT_SORTED),
(ARGS_COMPACT, DUMMY_JSON_TEXT_COMPACT),
]:
with open(self.infile.name, "w") as f:
f.write(DUMMY_JSON_TEXT_UNFORMATTED)
args = (
test_args
+ ARGS_DEBUG
+ ["--output", self.outfile.name, self.infile.name]
)
ji.cli(*args)
with open(self.outfile.name, "r") as f:
self.assertEqual(f.read(), expected_json_text)
def test_JSI_301_cli_indent_string(self):
for (test_args, expected_json_text) in [
(ARGS_INDENT, DUMMY_JSON_TEXT_FORMATTED),
]:
with open(self.infile.name, "w") as f:
f.write(DUMMY_JSON_TEXT_UNFORMATTED)
args = (
test_args
+ ARGS_DEBUG
+ ["--output", self.outfile.name, self.infile.name]
)
if pv.python_version_is_less_than(3, 2):
with self.assertRaises(TypeError) as context: # noqa: F841
ji.cli(*args)
errmsg = context.exception.args[0]
self.assertEqual(
errmsg, "can't multiply sequence by non-int of type 'str'"
)
else:
ji.cli(*args)
with open(self.outfile.name, "r") as f:
self.assertEqual(f.read(), expected_json_text)
def test_JSI_302_cli_inplace(self):
for (test_args, expected_json_text) in [
(ARGS_PLAIN, DUMMY_JSON_TEXT_FORMATTED),
(ARGS_SORTED, DUMMY_JSON_TEXT_SORTED),
(ARGS_COMPACT, DUMMY_JSON_TEXT_COMPACT),
]:
with open(self.infile.name, "w") as f:
f.write(DUMMY_JSON_TEXT_UNFORMATTED)
args = test_args + ARGS_DEBUG + ["--inplace", self.infile.name]
ji.cli(*args)
with open(self.infile.name, "r") as f:
self.assertEqual(f.read(), expected_json_text)
def test_JSI_303_cli_inplace_indent_string(self):
for (test_args, expected_json_text) in [
(ARGS_INDENT, DUMMY_JSON_TEXT_FORMATTED),
]:
with open(self.infile.name, "w") as f:
f.write(DUMMY_JSON_TEXT_UNFORMATTED)
args = test_args + ARGS_DEBUG + ["--inplace", self.infile.name]
if pv.python_version_is_less_than(3, 2):
with self.assertRaises(TypeError) as context: # noqa: F841
ji.cli(*args)
errmsg = context.exception.args[0]
self.assertEqual(
errmsg, "can't multiply sequence by non-int of type 'str'"
)
else:
ji.cli(*args)
with open(self.infile.name, "r") as f:
self.assertEqual(f.read(), expected_json_text)
def test_JSI_304_cli_newlines(self):
for (arg_set, expected_json_text) in [
(NEWLINE_ARGS_LINUX, DUMMY_JSON_TEXT_FORMATTED),
(NEWLINE_ARGS_NATIVE, DUMMY_JSON_TEXT_FORMATTED.replace("\n", os.linesep)),
(NEWLINE_ARGS_MICROSOFT, DUMMY_JSON_TEXT_FORMATTED.replace("\n", "\r\n")),
]:
for test_args in arg_set:
with open(self.infile.name, "w") as f:
f.write(DUMMY_JSON_TEXT_UNFORMATTED)
args = (
test_args
+ ARGS_PLAIN
+ ARGS_DEBUG
+ ["--output", self.outfile.name, self.infile.name]
)
ji.cli(*args)
with io.open(self.outfile.name, "rt", newline="") as f:
self.assertEqual(f.read(), expected_json_text)
def test_JSI_310_cli_version(self):
with self.assertRaises(SystemExit) as context: # noqa: F841
ji.cli(*["--version"])
def test_JSI_320_cli_version_via_main(self):
with self.assertRaises(SystemExit) as context: # noqa: F841
ji.main(*["--version"])
```
#### File: tests/json_indent/test_textiofile.py
```python
from __future__ import absolute_import
import os
import sys
import tempfile
import unittest
import json_indent.iofile as iof
DUMMY_PATH = "DummyPath"
DUMMY_INPUT_NEWLINE = "DummyInputNewline"
DUMMY_OUTPUT_NEWLINE = "DummyOutputNewline"
DUMMY_MODE = "DummyMode"
DUMMY_PURPOSE = "DummyPurpose"
DUMMY_IO_PROPERTY = "DummyIOProperty"
class TestTextIOFile(unittest.TestCase):
def setUp(self):
# Create temporary file for read/write testing
self.testfile = tempfile.NamedTemporaryFile(suffix=".json", delete=False)
def tearDown(self):
self.testfile.close()
os.remove(self.testfile.name)
def test_TIOF_000_instantiate_empty(self):
with self.assertRaises(TypeError) as context: # noqa: F841
iof.TextIOFile()
e = context.exception
message = e.args[0]
expected_messages = {
"python-2.7-exactly": "__init__() takes exactly 2 arguments (1 given)",
"python-2.7-at-least": "__init__() takes at least 2 arguments (1 given)",
"python-3.x": "__init__() missing 1 required positional argument: 'path'",
}
self.assertIn(message, expected_messages.values())
def test_TIOF_010_instantiate(self):
x = iof.TextIOFile(DUMMY_PATH)
self.assertEqual(x.path, DUMMY_PATH)
x = iof.TextIOFile(path=DUMMY_PATH)
self.assertEqual(x.path, DUMMY_PATH)
self.assertEqual(x.path, DUMMY_PATH)
x = iof.TextIOFile(DUMMY_PATH, DUMMY_INPUT_NEWLINE)
x = iof.TextIOFile(DUMMY_PATH, input_newline=DUMMY_INPUT_NEWLINE)
x = iof.TextIOFile(DUMMY_PATH, DUMMY_INPUT_NEWLINE, DUMMY_OUTPUT_NEWLINE)
x = iof.TextIOFile(
DUMMY_PATH,
input_newline=DUMMY_INPUT_NEWLINE,
output_newline=DUMMY_OUTPUT_NEWLINE,
)
def test_TIOF_100_raise_open_error(self):
x = iof.TextIOFile(DUMMY_PATH)
x.mode = DUMMY_MODE
with self.assertRaises(iof.IOFileOpenError) as context: # noqa: F841
x._raise_open_error(DUMMY_PURPOSE)
e = context.exception
self.assertIsInstance(e, iof.IOFileOpenError)
self.assertEqual(e.path, DUMMY_PATH)
self.assertEqual(e.mode, DUMMY_MODE)
self.assertEqual(e.purpose, DUMMY_PURPOSE)
message = e.args[0]
expected_message = "{path}: error opening for {purpose} (mode: {mode})".format(
path=DUMMY_PATH, mode=DUMMY_MODE, purpose=DUMMY_PURPOSE
)
self.assertEqual(message, expected_message)
def test_TIOF_110_get_io_property(self):
x = iof.TextIOFile(DUMMY_PATH)
self.assertIsNone(x._get_io_property("input", "newline"))
self.assertIsNone(x._get_io_property("output", "newline"))
x = iof.TextIOFile(
DUMMY_PATH,
input_newline=DUMMY_INPUT_NEWLINE,
output_newline=DUMMY_OUTPUT_NEWLINE,
)
for (purpose, mode, newline, stream) in [
("input", "rt", DUMMY_INPUT_NEWLINE, sys.stdin),
("output", "wt", DUMMY_OUTPUT_NEWLINE, sys.stdout),
]:
self.assertEqual(x._get_io_property(purpose, "target_mode"), mode)
self.assertEqual(x._get_io_property(purpose, "newline"), newline)
self.assertIs(x._get_io_property(purpose, "stdio_stream"), stream)
with self.assertRaises(ValueError) as context: # noqa: F841
x._get_io_property(DUMMY_PURPOSE, "target_mode")
with self.assertRaises(ValueError) as context: # noqa: F841
x._get_io_property(DUMMY_PURPOSE, "newline")
with self.assertRaises(ValueError) as context: # noqa: F841
x._get_io_property(DUMMY_PURPOSE, "stdio_stream")
with self.assertRaises(ValueError) as context: # noqa: F841
x._get_io_property(DUMMY_PURPOSE, DUMMY_IO_PROPERTY)
with self.assertRaises(KeyError) as context: # noqa: F841
x._get_io_property("input", DUMMY_IO_PROPERTY)
with self.assertRaises(KeyError) as context: # noqa: F841
x._get_io_property("output", DUMMY_IO_PROPERTY)
def test_TIOF_120_close(self):
for (path, openfile, should_be_closed) in [
(self.testfile.name, open(self.testfile.name), True),
("-", sys.stdin, False),
("-", sys.stdout, False),
]:
x = iof.TextIOFile(path)
x.mode = DUMMY_MODE
x.file = openfile
x.close()
self.assertIs(x.mode, None)
self.assertIs(x.file, None)
self.assertTrue(
openfile.closed if should_be_closed else not openfile.closed
)
def test_TIOF_130_open_for_purpose(self):
x = iof.TextIOFile(self.testfile.name)
for (purpose, expected_mode) in [("input", "rt"), ("output", "wt")]:
f = x._open_for_purpose(purpose)
self.assertEqual(x.mode, expected_mode)
self.assertIs(f, x.file)
# Repeating should give same results even if open
f = x._open_for_purpose(purpose)
self.assertEqual(x.mode, expected_mode)
self.assertIs(f, x.file)
x.close()
def test_TIOF_140_open_stdio_streams(self):
x = iof.TextIOFile("-")
for (purpose, expected_mode, expected_stream) in [
("input", "rt", sys.stdin),
("output", "wt", sys.stdout),
]:
f = x._open_for_purpose(purpose)
self.assertEqual(x.mode, expected_mode)
self.assertEqual(f.fileno(), expected_stream.fileno())
x.close()
def test_TIOF_150_open_for_input(self):
x = iof.TextIOFile(self.testfile.name)
f = x.open_for_input()
self.assertEqual(x.mode, "rt")
self.assertIs(f, x.file)
# Repeating should give same results even if open
f = x.open_for_input()
self.assertEqual(x.mode, "rt")
self.assertIs(f, x.file)
x.close()
def test_TIOF_160_open_for_output(self):
x = iof.TextIOFile(self.testfile.name)
f = x.open_for_output()
self.assertEqual(x.mode, "wt")
self.assertIs(f, x.file)
# Repeating should give same results even if open
f = x.open_for_output()
self.assertEqual(x.mode, "wt")
self.assertIs(f, x.file)
x.close()
def test_TIOF_170_reopen(self):
x = iof.TextIOFile(self.testfile.name)
x.open_for_input()
with self.assertRaises(iof.IOFileOpenError) as context: # noqa: F841
x.open_for_output()
x.close()
x.open_for_output()
with self.assertRaises(iof.IOFileOpenError) as context: # noqa: F841
x.open_for_input()
x.close()
``` |
{
"source": "jmknoble/markdown-toc",
"score": 3
} |
#### File: markdown-toc/markdown_toc/cli.py
```python
from __future__ import print_function
import datetime
import difflib
import os.path
import sys
import argcomplete
from . import argparsing, completion, get_version, iofile, mdfile
####################
STATUS_SUCCESS = 0
STATUS_FAILURE = 1
STATUS_CHANGED = 99
DIFF_CONTEXT_LINES = 3
NEWLINE_FORMAT_LINUX = "linux"
NEWLINE_FORMAT_MICROSOFT = "microsoft"
NEWLINE_FORMAT_NATIVE = "native"
NEWLINE_FORMATS = [
NEWLINE_FORMAT_LINUX,
NEWLINE_FORMAT_MICROSOFT,
NEWLINE_FORMAT_NATIVE,
]
NEWLINE_FORMAT_ALIAS_DOS = "dos"
NEWLINE_FORMAT_ALIAS_MACOS = "macos"
NEWLINE_FORMAT_ALIAS_MSFT = "msft"
NEWLINE_FORMAT_ALIAS_UNIX = "unix"
NEWLINE_FORMAT_ALIAS_WINDOWS = "windows"
NEWLINE_FORMAT_ALIASES = {
NEWLINE_FORMAT_ALIAS_DOS: NEWLINE_FORMAT_MICROSOFT,
NEWLINE_FORMAT_ALIAS_MACOS: NEWLINE_FORMAT_LINUX,
NEWLINE_FORMAT_ALIAS_MSFT: NEWLINE_FORMAT_MICROSOFT,
NEWLINE_FORMAT_ALIAS_UNIX: NEWLINE_FORMAT_LINUX,
NEWLINE_FORMAT_ALIAS_WINDOWS: NEWLINE_FORMAT_MICROSOFT,
}
ALL_NEWLINE_FORMATS = sorted(NEWLINE_FORMATS + list(NEWLINE_FORMAT_ALIASES.keys()))
NEWLINE_VALUES = {
NEWLINE_FORMAT_LINUX: "\n",
NEWLINE_FORMAT_MICROSOFT: "\r\n",
NEWLINE_FORMAT_NATIVE: None,
}
DEFAULT_NEWLINES = NEWLINE_FORMAT_NATIVE
DEFAULT_HEADING_TEXT = "Contents"
DEFAULT_HEADING_LEVEL = 1
DEFAULT_ADD_TRAILING_HEADING_CHARS = False
DEFAULT_ALT_LIST_CHAR = False
DEFAULT_NUMBERED = False
DEFAULT_SKIP_LEVEL = 0
####################
def _generate_comment(
prog, argv, suffix="", with_full_command=False, with_datestamp=False
):
if with_full_command:
command = "'" + " ".join(argv) + "'"
else:
command = os.path.basename(prog)
datestamp = "".join([datetime.datetime.utcnow().isoformat(), "Z"])
if with_datestamp:
template = "Generated by {command} on {datestamp}{suffix}"
else:
template = "Generated by {command}{suffix}"
comment_text = template.format(command=command, datestamp=datestamp, suffix=suffix)
return comment_text
def _compute_diff(filename, input_text, output_text, context_lines=DIFF_CONTEXT_LINES):
input_filename = os.path.join("a", filename)
output_filename = os.path.join("b", filename)
input_lines = input_text.split("\n")
output_lines = output_text.split("\n")
return difflib.unified_diff(
input_lines,
output_lines,
fromfile=input_filename,
tofile=output_filename,
n=context_lines,
# TODO: Remove this if we start using readlines() to get input/output text.
lineterm="",
)
def _add_file_arguments(parser):
parser.add_argument(
"input_filenames",
nargs="*",
action="store",
default=[],
metavar="INPUTFILE",
help="input file[s], or '-' for stdin (default: stdin)",
)
parser.add_argument(
"-o",
"--output",
action="store",
dest="output_filename",
default=None,
metavar="OUTPUTFILE",
help=(
"output file, or '-' for stdout (default: stdout); "
"conflicts with '--inplace'"
),
)
parser.add_argument(
"-I",
"--inplace",
"--in-place",
action="store_true",
help="write changes to input file in place",
)
def _add_diff_arguments(parser):
diff_mutex_group = parser.add_mutually_exclusive_group()
diff_mutex_group.add_argument(
"-C",
"--changed",
"--show-changed",
dest="show_changed",
action="store_true",
default=False,
help="when used with '--inplace', note when a file has changed",
)
diff_mutex_group.add_argument(
"-D",
"--diff",
"--show-diff",
dest="show_diff",
action="store_true",
default=False,
help="when used with '--inplace', show differences when a file has changed",
)
def _add_newline_arguments(parser):
newlines_mutex_group = parser.add_mutually_exclusive_group()
newlines_mutex_group.add_argument(
"--newlines",
dest="newlines",
action="store",
choices=ALL_NEWLINE_FORMATS,
default=DEFAULT_NEWLINES,
help="newline format (default: {})".format(DEFAULT_NEWLINES),
)
newlines_mutex_group.add_argument(
"-L",
"--linux",
dest="newlines",
action="store_const",
const=NEWLINE_FORMAT_LINUX,
help="same as '--newlines {}'".format(NEWLINE_FORMAT_LINUX),
)
newlines_mutex_group.add_argument(
"-M",
"--microsoft",
dest="newlines",
action="store_const",
const=NEWLINE_FORMAT_MICROSOFT,
help="same as '--newlines {}'".format(NEWLINE_FORMAT_MICROSOFT),
)
newlines_mutex_group.add_argument(
"-N",
"--native",
dest="newlines",
action="store_const",
const=NEWLINE_FORMAT_NATIVE,
help="same as '--newlines {}'".format(NEWLINE_FORMAT_NATIVE),
)
def _add_heading_arguments(parser):
parser.add_argument(
"-T",
"--heading-text",
action="store",
default=DEFAULT_HEADING_TEXT,
help="Text of heading above table of contents (default: '{default}')".format(
default=DEFAULT_HEADING_TEXT
),
)
parser.add_argument(
"-H",
"--heading-level",
action="store",
type=int,
default=DEFAULT_HEADING_LEVEL,
help="Level of heading above table of contents (default: {default})".format(
default=DEFAULT_HEADING_LEVEL
),
)
parser.add_argument(
"-S",
"--skip-level",
action="store",
type=int,
default=DEFAULT_SKIP_LEVEL,
help=(
"Number of heading levels to leave out of table contents "
"(default: {default})"
).format(default=DEFAULT_SKIP_LEVEL),
)
def _add_option_arguments(parser):
parser.add_argument(
"-#",
"--add-trailing-heading-chars",
dest="add_trailing_heading_chars",
action="store_true",
default=DEFAULT_ADD_TRAILING_HEADING_CHARS,
help=(
"Add trailing '#' characters to the table of contents heading "
"(default: {default})"
).format(default=DEFAULT_ADD_TRAILING_HEADING_CHARS),
)
parser.add_argument(
"-l",
"--alt-list-char",
"--alternate-list-character",
action="store_true",
default=DEFAULT_ALT_LIST_CHAR,
help=(
"Use alternate list character ('*') for table of contents entries "
"(default: use '-')"
),
)
parser.add_argument(
"-n",
"--numbered",
action="store_true",
default=DEFAULT_NUMBERED,
help=("Add numbering to table of contents entries (default: {default})").format(
default=DEFAULT_NUMBERED
),
)
def _add_comment_arguments(parser):
comment_arg_group = parser.add_mutually_exclusive_group()
comment_arg_group.add_argument(
"-c",
"--comment",
action="store",
default=None,
help=(
"Comment to add to Markdown source near table of contents "
"(default: auto-generated)"
),
)
comment_arg_group.add_argument(
"--no-comment",
"--nocomment",
dest="comment",
action="store_const",
const="",
help="Do not add any comment to Markdown source",
)
return comment_arg_group
def _add_pre_commit_arguments(parser):
parser.add_argument(
"--pre-commit",
action="store_true",
help="Shortcut for '--inplace --changed' with static default comment",
)
def _add_completion_arguments(parser):
parser.add_argument(
"--completion-help",
action="store_true",
help="Print instructions for enabling shell command-line autocompletion",
)
parser.add_argument(
"--bash-completion",
action="store_true",
help="Print autocompletion code for Bash-compatible shells to evaluate",
)
def _setup_args(argv):
(prog, argv) = argparsing.grok_argv(argv)
parser = argparsing.setup_argparse(
prog=prog,
description=(
"Add or update a table of contents in one or more "
"GitHub-flavored Markdown documents."
),
)
_add_file_arguments(parser)
_add_diff_arguments(parser)
_add_newline_arguments(parser)
_add_heading_arguments(parser)
_add_option_arguments(parser)
_add_comment_arguments(parser)
_add_pre_commit_arguments(parser)
_add_completion_arguments(parser)
parser.add_argument("-V", "--version", action="version", version=get_version(prog))
argcomplete.autocomplete(parser)
args = parser.parse_args()
return (prog, args)
def _check_newlines(cli_args):
if cli_args.newlines not in NEWLINE_FORMATS:
cli_args.newlines = NEWLINE_FORMAT_ALIASES[cli_args.newlines]
def _normalize_path(path):
"""Fully regularize a given filesystem path."""
if path != "-":
path = os.path.normcase(os.path.normpath(os.path.realpath(path)))
return path
def _check_input_and_output_filenames(cli_args):
"""Check args found by `argparse.ArgumentParser`:py:class: and regularize."""
if len(cli_args.input_filenames) == 0:
cli_args.input_filenames.append("-") # default to stdin
if not cli_args.inplace:
if cli_args.output_filename is None:
cli_args.output_filename = "-" # default to stdout
if len(cli_args.input_filenames) > 1:
raise RuntimeError(
"to process more than one input file at a time, use '--inplace'"
)
output_filename = _normalize_path(cli_args.output_filename)
input_filename = _normalize_path(cli_args.input_filenames[0])
if input_filename != "-" and input_filename == output_filename:
raise RuntimeError(
"input file and output file are the same; "
"use '--inplace' to modify files in place"
)
else:
if cli_args.output_filename is not None:
raise RuntimeError("output files do not make sense with '--inplace'")
for i in range(len(cli_args.input_filenames)):
if cli_args.input_filenames[i] == "-":
raise RuntimeError(
"reading from stdin does not make sense with '--inplace'"
)
def _check_completion_args(cli_args):
return any([cli_args.completion_help, cli_args.bash_completion])
def _do_completion(cli_args, prog):
if cli_args.completion_help:
print(completion.get_instructions(prog, ["--bash-completion"]))
elif cli_args.bash_completion:
print(completion.get_commands(prog))
def _check_pre_commit_args(cli_args):
if not cli_args.pre_commit:
return
cli_args.inplace = True
if not cli_args.show_diff:
cli_args.show_changed = True
def _check_diff_args(cli_args):
if cli_args.show_changed and not cli_args.inplace:
raise RuntimeError("'-C/--show-changed' only makes sense with '--inplace'")
if cli_args.show_diff and not cli_args.inplace:
raise RuntimeError("'-D/--show-diff' only makes sense with '--inplace'")
def _set_default_comment(cli_args, prog, argv):
if cli_args.comment is not None:
return
cli_args.comment = (
_generate_comment(prog, argv, suffix=" pre-commit hook")
if cli_args.pre_commit
else _generate_comment(prog, argv, with_full_command=True, with_datestamp=True)
)
def main(*argv):
"""Do the thing."""
(prog, args) = _setup_args(argv)
if _check_completion_args(args):
_do_completion(args, prog)
return STATUS_SUCCESS
_check_pre_commit_args(args)
_check_diff_args(args)
_check_newlines(args)
_check_input_and_output_filenames(args)
_set_default_comment(args, prog, argv)
overall_status = STATUS_SUCCESS
for input_filename in args.input_filenames:
file_status = STATUS_SUCCESS
input_iofile = iofile.TextIOFile(
input_filename,
input_newline="",
output_newline=NEWLINE_VALUES[args.newlines],
)
output_iofile = (
input_iofile
if args.inplace
else iofile.TextIOFile(
args.output_filename,
input_newline="",
output_newline=NEWLINE_VALUES[args.newlines],
)
)
input_iofile.open_for_input()
md = mdfile.MarkdownFile(
infile=input_iofile.file, infilename=input_iofile.printable_name
)
try:
input_text = md.read()
md.parse(
heading_text=args.heading_text,
heading_level=args.heading_level,
skip_level=args.skip_level,
)
except (TypeError, ValueError) as e:
if not args.inplace:
raise SystemExit(e)
overall_status = STATUS_FAILURE
file_status = STATUS_FAILURE
print(e, file=sys.stderr)
input_iofile.close()
if file_status != STATUS_FAILURE:
output_iofile.open_for_output()
md.write(
numbered=args.numbered,
toc_comment=args.comment,
alt_list_char=args.alt_list_char,
add_trailing_heading_chars=args.add_trailing_heading_chars,
outfile=output_iofile.file,
)
output_iofile.close()
if args.inplace and (args.show_changed or args.show_diff):
output_iofile.open_for_input()
output_text = output_iofile.file.read()
if input_text != output_text:
file_status = STATUS_CHANGED
if overall_status != STATUS_FAILURE:
overall_status = file_status
print(
"Updated {}".format(output_iofile.file.name),
file=sys.stderr,
)
if args.show_diff:
for line in _compute_diff(
output_iofile.file.name, input_text, output_text
):
print(line)
output_iofile.close()
return overall_status
if __name__ == "__main__":
sys.exit(main())
``` |
{
"source": "jmknoble/python-venv",
"score": 2
} |
#### File: python-venv/python_venv/__main__.py
```python
from __future__ import absolute_import
import sys
from . import cli
def main():
"""Provide a generic main entry point."""
sys.exit(cli.main(*sys.argv))
if __name__ == "__main__":
main()
```
#### File: tests/python_venv/contextmgr.py
```python
import contextlib
import os
import os.path
import sys
import tempfile
from io import StringIO
@contextlib.contextmanager
def capture(a_callable, *args, **kwargs):
"""Capture status, stdout, and stderr from a function or method call"""
(orig_stdout, sys.stdout) = (sys.stdout, StringIO())
(orig_stderr, sys.stderr) = (sys.stderr, StringIO())
try:
status = a_callable(*args, **kwargs)
sys.stdout.seek(0)
sys.stderr.seek(0)
yield (status, sys.stdout.read(), sys.stderr.read())
finally:
sys.stdout.close()
sys.stderr.close()
sys.stdout = orig_stdout
sys.stderr = orig_stderr
@contextlib.contextmanager
def capture_to_file(a_callable, *args, **kwargs):
"""Capture status, stdout, and stderr from a function or method call"""
orig_stdout = sys.stdout
orig_stderr = sys.stderr
try:
sys.stdout = tempfile.SpooledTemporaryFile(
mode="w+", prefix=a_callable.__name__, suffix=".out"
)
sys.stderr = tempfile.SpooledTemporaryFile(
mode="w+", prefix=a_callable.__name__, suffix=".err"
)
status = a_callable(*args, **kwargs)
sys.stdout.seek(0)
sys.stderr.seek(0)
yield (status, sys.stdout.read(), sys.stderr.read())
finally:
sys.stdout.close()
sys.stderr.close()
sys.stdout = orig_stdout
sys.stderr = orig_stderr
@contextlib.contextmanager
def capture_output():
"""Capture stdout, and stderr"""
(orig_stdout, sys.stdout) = (sys.stdout, StringIO())
(orig_stderr, sys.stderr) = (sys.stderr, StringIO())
try:
yield (sys.stdout, sys.stderr)
finally:
sys.stdout.close()
sys.stderr.close()
sys.stdout = orig_stdout
sys.stderr = orig_stderr
@contextlib.contextmanager
def capture_output_to_file():
"""Capture stdout, and stderr"""
orig_stdout = sys.stdout
orig_stderr = sys.stderr
try:
sys.stdout = tempfile.SpooledTemporaryFile(mode="w+", suffix=".out")
sys.stderr = tempfile.SpooledTemporaryFile(mode="w+", suffix=".err")
yield (sys.stdout, sys.stderr)
finally:
sys.stdout.close()
sys.stderr.close()
sys.stdout = orig_stdout
sys.stderr = orig_stderr
SETUP_TEMPLATE = """
import os.path
from setuptools import find_packages, setup
SETUP_DIR = os.path.dirname(os.path.realpath(__file__))
def get_requirements_from_file(dirname, basename, default=None):
reqs_path = os.path.join(dirname, basename)
if os.path.isfile(reqs_path):
with open(reqs_path) as f:
return [x for x in (y.strip() for y in f) if not x.startswith("#")]
else:
return [] if default is None else default
setup(
name="{package_name}",
packages=find_packages(
include=["*"],
exclude=[
"build",
"dist",
"docs",
"examples",
"tests",
"tests.*",
"*.egg-info",
]
),
install_requires=get_requirements_from_file(SETUP_DIR, "requirements.txt"),
)
"""
def _ensure_relative_path(path):
if ".." in path:
raise ValueError(
f"path must be relative and may not refer to parent dir: '{path}'"
)
if not path.startswith(os.path.join(os.curdir, "")):
path = os.path.join(os.curdir, path)
return path
def _python_lib_version():
major = sys.version_info[0]
minor = sys.version_info[1]
return f"python{major}.{minor}"
def _clean_syspath(syspath, suffix):
syspath_parts = syspath.split(os.pathsep)
for env_var in [
"VIRTUAL_ENV",
"CONDA_PREFIX",
]:
env_dir = os.environ.get(env_var)
if not env_dir:
continue
env_dir = os.path.join(env_dir, suffix)
indexes = []
for (i, path) in enumerate(syspath_parts):
if path == env_dir:
indexes.append(i)
for i in reversed(indexes):
del syspath_parts[i]
return os.pathsep.join(syspath_parts)
def _clean_environ_copy(environ):
environ = environ.copy()
try:
environ["PATH"] = _clean_syspath(environ["PATH"], "bin")
environ["PYTHONPATH"] = _clean_syspath(
environ["PYTHONPATH"],
os.path.join("lib", _python_lib_version(), "site-packages"),
)
except KeyError:
pass
for env_var in [
"PYTHONHOME",
"VIRTUAL_ENV",
"CONDA_DEFAULT_ENV",
"CONDA_PREFIX",
]:
try:
del environ[env_var]
except KeyError:
pass
return environ
@contextlib.contextmanager
def project(
package_name, dirs=None, filespecs=None, cleanup=None, omit_setup=False, **kwargs
):
"""
Set up a mock Python project to create an environment from and change
directory to it.
:Args:
package_name
The name of the Python package the project contains.
dirs
(optional) a list of directories to create recursively.
filespecs
(optional) a dictionary in the form::
{
relative_path: text,
...
}
where `relative_path` is a relative path to a file, and `text` is a
string or string template to be written as the contents. If
`kwargs` is supplied, `text` will be written as
``text.format(**kwargs)``.
cleanup
(optional) a callable; if supplied, will be called on exiting the
context manager before any other cleanup.
omit_setup
(optional) if `True`-ish, omit ``setup.py`` from the project.
kwargs
(optional) additional keyword arguments used for formatting
contents of files
"""
try:
if dirs is None:
dirs = []
if filespecs is None:
filespecs = {}
original_environ = os.environ
os.environ = _clean_environ_copy(os.environ)
original_cwd = os.getcwd()
temp_dir = tempfile.TemporaryDirectory()
os.chdir(temp_dir.name)
if not omit_setup:
with open("setup.py", "w") as f:
f.write(SETUP_TEMPLATE.format(package_name=package_name))
package_dir = _ensure_relative_path(package_name)
os.mkdir(package_dir)
with open(os.path.join(package_dir, "__init__.py"), "w"):
pass # empty file is ok
for path in dirs:
path = _ensure_relative_path(path)
os.makedirs(os.path.dirname(path), exist_ok=True)
for (path, contents) in filespecs.items():
path = _ensure_relative_path(path)
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, "w") as f:
if kwargs:
contents = contents.format(**kwargs)
f.write(contents)
yield
finally:
if cleanup:
cleanup()
os.environ = original_environ
os.chdir(original_cwd)
temp_dir.cleanup()
```
#### File: tests/python_venv/test_env_pyenv.py
```python
import os
import os.path
import random
import subprocess
import unittest
import parameterized # https://pypi.org/project/parameterized/
from python_venv import const, env
from python_venv import exceptions as exc
from python_venv import reqs
from tests.python_venv import contextmgr as ctx
from tests.python_venv import flags
########################################
@unittest.skipUnless(flags.should_run_pyenv_tests(), flags.SKIP_PYENV_MESSAGE)
class TestEnv_200_PyenvEnvironment(unittest.TestCase):
def setUp(self):
self.saved_requirements = reqs.REQUIREMENTS
def tearDown(self):
reqs.REQUIREMENTS = self.saved_requirements
def test_PV_ENV_PYNV_000_instantiate_empty(self):
with self.assertRaises(TypeError) as raised:
env.PyenvEnvironment()
msg = raised.exception.args[0]
self.assertTrue(
msg.startswith("__init__() missing 1 required positional argument")
)
@parameterized.parameterized.expand(
[
("dry_run", {"dry_run": True}, "dry_run", True),
("force", {"force": True}, "force", True),
(
"message_prefix",
{"message_prefix": "dummy_message_prefix"},
"message_prefix",
"dummy_message_prefix",
),
("python", {"python": "dummy_python"}, "python", "dummy_python"),
("basename", {"basename": "dummy_basename"}, "_basename", "dummy_basename"),
("env_name", {"env_name": "dummy_env_name"}, "_env_name", "dummy_env_name"),
(
"env_prefix",
{"env_prefix": "dummy_env_prefix"},
"_env_prefix",
"dummy_env_prefix",
),
]
)
def test_PV_ENV_PYNV_002_instantiate_kwargs(self, name, kwargs, attr, value):
x = env.PyenvEnvironment("dummy_req_scheme", **kwargs)
self.assertEqual(getattr(x, attr), value)
def test_PV_ENV_PYNV_010_requirements(self):
dummy_requirements = {"dummy_req_source": ["dummy_requirement"]}
reqs.REQUIREMENTS = {"dummy_req_scheme": [dummy_requirements]}
x = env.PyenvEnvironment("dummy_req_scheme")
self.assertListEqual(x.requirements.requirements, [dummy_requirements])
def test_PV_ENV_PYNV_020_package_name(self):
x = env.PyenvEnvironment("dummy_req_scheme")
self.assertEqual(x.package_name, "python_venv")
@parameterized.parameterized.expand(
[
("default", None, "python-venv"),
("specified", "dummy-package", "dummy-package"),
]
)
def test_PV_ENV_PYNV_030_basename(self, name, basename, expected):
kwargs = {} if basename is None else {"basename": basename}
x = env.PyenvEnvironment("dummy_req_scheme", **kwargs)
self.assertEqual(x.basename, expected)
@parameterized.parameterized.expand(
[
("default", reqs.REQ_SCHEME_PLAIN, {}, "python-venv"),
("default_dev", reqs.REQ_SCHEME_DEV, {}, "python-venv-dev"),
("default_devplus", reqs.REQ_SCHEME_DEVPLUS, {}, "python-venv-dev"),
(
"default_prefix",
reqs.REQ_SCHEME_PLAIN,
{"env_prefix": "dummy-prefix-"},
"dummy-prefix-python-venv",
),
(
"basename",
reqs.REQ_SCHEME_PLAIN,
{"basename": "dummy-package"},
"dummy-package",
),
(
"basename_dev",
reqs.REQ_SCHEME_DEV,
{"basename": "dummy-package"},
"dummy-package-dev",
),
(
"basename_devplus",
reqs.REQ_SCHEME_DEVPLUS,
{"basename": "dummy-package"},
"dummy-package-dev",
),
(
"basename_prefix",
reqs.REQ_SCHEME_PLAIN,
{"basename": "dummy-package", "env_prefix": "dummy-prefix-"},
"dummy-prefix-dummy-package",
),
("specified", "dummy_req_scheme", {"env_name": "dummy-env"}, "dummy-env"),
(
"specified_prefix",
"dummy_req_scheme",
{"env_name": "dummy-env", "env_prefix": "dummy-prefix-"},
"dummy-env",
),
]
)
def test_PV_ENV_PYNV_040_env_name(self, name, req_scheme, kwargs, expected):
x = env.PyenvEnvironment(req_scheme, **kwargs)
self.assertEqual(x.env_name, expected)
@parameterized.parameterized.expand(
[
("default", "dummy-basename", None, None, "<ENV_DIR>"),
("specified", None, "dummy-env", None, "<ENV_DIR>"),
("with_prefix", "dummy-basename", None, "dummy-prefix", "<ENV_DIR>"),
(
"specified_with_prefix",
"dummy-basename",
"dummy-env",
"dummy-prefix",
"<ENV_DIR>",
),
]
)
def test_PV_ENV_PYNV_050_env_dir_dry_run(
self, name, basename, env_name, env_prefix, expected
):
kwargs = {}
if basename is not None:
kwargs["basename"] = basename
if env_name is not None:
kwargs["env_name"] = env_name
if env_prefix is not None:
kwargs["env_prefix"] = env_prefix
x = env.PyenvEnvironment(reqs.REQ_SCHEME_PLAIN, dry_run=True, **kwargs)
self.assertEqual(x.env_dir, expected)
@parameterized.parameterized.expand(
[
(
"default",
"dummy-basename",
None,
None,
os.path.join(os.getcwd(), "<ENV_DIR>"),
),
(
"specified",
None,
"dummy-env",
None,
os.path.join(os.getcwd(), "<ENV_DIR>"),
),
(
"with_prefix",
"dummy-basename",
None,
"dummy-prefix",
os.path.join(os.getcwd(), "<ENV_DIR>"),
),
(
"specified_with_prefix",
"dummy-basename",
"dummy-env",
"dummy-prefix",
os.path.join(os.getcwd(), "<ENV_DIR>"),
),
]
)
def test_PV_ENV_PYNV_051_abs_env_dir_dry_run(
self, name, basename, env_name, env_prefix, expected
):
kwargs = {}
if basename is not None:
kwargs["basename"] = basename
if env_name is not None:
kwargs["env_name"] = env_name
if env_prefix is not None:
kwargs["env_prefix"] = env_prefix
x = env.PyenvEnvironment(reqs.REQ_SCHEME_PLAIN, dry_run=True, **kwargs)
self.assertEqual(x.abs_env_dir, expected)
@parameterized.parameterized.expand(
[
("specified", "dummy-env", "dummy-env"),
]
)
def test_PV_ENV_PYNV_060_env_description(self, name, env_name, expected):
kwargs = {} if env_name is None else {"env_name": env_name}
x = env.PyenvEnvironment("dummy_req_scheme", **kwargs)
x.env_description
self.assertTrue(x.env_description.endswith(expected))
@parameterized.parameterized.expand(
[
("dry_run_text", {}, "[DRY-RUN]"),
("create_msg", {}, "Creating pyenv environment dummy-package"),
("create_venv", {}, "+ pyenv virtualenv"),
("install_msg", {}, "Installing dummy_req_scheme requirements"),
(
"pip_install",
{},
"+ <ENV_DIR>/bin/python3 -m pip install -r dummy_requirements.txt",
),
("success", {}, "==> Done."),
]
)
def test_PV_ENV_PYNV_100_create_dry_run(self, name, kwargs, expected_text):
dummy_requirements = {const.FROM_FILES: ["dummy_requirements.txt"]}
reqs.REQUIREMENTS = {"dummy_req_scheme": [dummy_requirements]}
x = env.PyenvEnvironment(
"dummy_req_scheme",
dry_run=True,
basename="dummy-package",
ignore_preflight_checks=True,
**kwargs,
)
with ctx.capture(x.create) as (
status,
_stdout,
stderr,
):
self.assertTrue(expected_text in stderr)
@parameterized.parameterized.expand(
[
("dry_run_text", "[DRY-RUN]"),
("remove_msg", "Removing pyenv environment dummy-package"),
]
)
def test_PV_ENV_PYNV_200_remove_dry_run(self, name, expected_text):
x = env.PyenvEnvironment(
reqs.REQ_SCHEME_PLAIN, dry_run=True, basename="dummy-package"
)
with ctx.capture(x.remove) as (status, _stdout, stderr):
self.assertTrue(expected_text in stderr)
@parameterized.parameterized.expand(
[
("dry_run_text", "[DRY-RUN]"),
("replace_msg", "Replacing pyenv environment dummy-package"),
("remove_msg", "Removing pyenv environment dummy-package"),
("create_msg", "Creating pyenv environment dummy-package"),
("success", "==> Done."),
]
)
def test_PV_ENV_PYNV_300_replace_dry_run(self, name, expected_text):
dummy_requirements = {const.FROM_FILES: ["dummy_requirements.txt"]}
reqs.REQUIREMENTS = {"dummy_req_scheme": [dummy_requirements]}
x = env.PyenvEnvironment(
"dummy_req_scheme",
dry_run=True,
basename="dummy-package",
ignore_preflight_checks=True,
)
with ctx.capture(x.replace) as (status, _stdout, stderr):
self.assertTrue(expected_text in stderr)
########################################
@unittest.skipUnless(flags.should_run_pyenv_tests(), flags.SKIP_PYENV_MESSAGE)
class TestEnv_210_PyenvCreate(unittest.TestCase):
def setUp(self):
self.env_name = None
try:
self.choices
except AttributeError:
self.choices = (
[chr(x) for x in range(ord("0"), ord("9") + 1)]
+ [chr(x) for x in range(ord("A"), ord("Z") + 1)]
+ [chr(x) for x in range(ord("a"), ord("z") + 1)]
)
# Random prefix for environments is required
# since pyenv virtualenv doesn't give us a choice
# to place an environment somewhere specific.
self.env_prefix = "".join(random.choice(self.choices) for x in range(10)) + "-"
def tearDown(self):
if self.env_name is not None:
# remove pyenv virtual environment
subprocess.call(
["pyenv", "virtualenv-delete", "-f", self.env_name],
stderr=subprocess.DEVNULL,
)
self.env_name = None
@parameterized.parameterized.expand(
[
("plain_dry_run", reqs.REQ_SCHEME_PLAIN, True, None, None, []),
("plain", reqs.REQ_SCHEME_PLAIN, False, None, None, []),
(
"plain_dry_run_env_name",
reqs.REQ_SCHEME_PLAIN,
True,
None,
"dummy-env",
[],
),
("plain_env_name", reqs.REQ_SCHEME_PLAIN, False, None, "dummy-env", []),
("dev_dry_run", reqs.REQ_SCHEME_DEV, True, None, None, []),
("dev", reqs.REQ_SCHEME_DEV, False, None, None, []),
("devplus_dry_run", reqs.REQ_SCHEME_DEVPLUS, True, None, None, []),
("devplus", reqs.REQ_SCHEME_DEVPLUS, False, None, None, []),
("frozen_dry_run", reqs.REQ_SCHEME_FROZEN, True, None, None, []),
("frozen", reqs.REQ_SCHEME_FROZEN, False, None, None, []),
("source_dry_run", reqs.REQ_SCHEME_SOURCE, True, None, None, []),
("source", reqs.REQ_SCHEME_SOURCE, False, None, None, []),
("wheel_dry_run", reqs.REQ_SCHEME_WHEEL, True, None, None, []),
("wheel", reqs.REQ_SCHEME_WHEEL, False, None, None, []),
("package_dry_run", reqs.REQ_SCHEME_PACKAGE, True, "argcomplete", None, []),
("package", reqs.REQ_SCHEME_PACKAGE, False, "argcomplete", None, []),
("pip_dry_run", reqs.REQ_SCHEME_PIP, True, None, None, ["argcomplete"]),
("pip", reqs.REQ_SCHEME_PIP, False, None, None, ["argcomplete"]),
]
)
def test_PV_ENV_PYNV_110_create(
self, name, req_scheme, dry_run, basename, env_name, pip_args
):
env_prefix = self.env_prefix
if env_name:
env_name = env_prefix + env_name
dirs = []
filespecs = {
"requirements.txt": "argcomplete",
"requirements_dev.txt": "argcomplete",
"requirements_frozen.txt": "argcomplete == 1.12.3",
os.path.join("dev", "requirements_build.txt"): "",
os.path.join("dev", "requirements_dev.txt"): "",
os.path.join("dev", "requirements_test.txt"): "parameterized",
}
with ctx.project("dummy_package", dirs=dirs, filespecs=filespecs):
x = env.PyenvEnvironment(
req_scheme,
pip_args=pip_args,
basename=basename,
env_name=env_name,
env_prefix=env_prefix,
dry_run=dry_run,
force=True,
)
self.env_name = x.env_name
if not flags.should_suppress_output():
x.create()
else:
original_stderr = None
with ctx.capture_to_file(x.create) as (
_status,
_stdout,
stderr,
):
original_stderr = stderr
testable_stderr = original_stderr.lower()
if "error" in testable_stderr:
print(original_stderr, file=stderr)
self.assertNotIn("error", testable_stderr)
@parameterized.parameterized.expand(
[
("plain_dry_run", reqs.REQ_SCHEME_PLAIN, True, None, None),
("plain", reqs.REQ_SCHEME_PLAIN, False, None, None),
("dev_dry_run", reqs.REQ_SCHEME_DEV, True, None, None),
("dev", reqs.REQ_SCHEME_DEV, False, None, None),
("devplus_dry_run", reqs.REQ_SCHEME_DEVPLUS, True, None, None),
("devplus", reqs.REQ_SCHEME_DEVPLUS, False, None, None),
("frozen_dry_run", reqs.REQ_SCHEME_FROZEN, True, None, None),
("frozen", reqs.REQ_SCHEME_FROZEN, False, None, None),
]
)
def test_PV_ENV_PYNV_120_create_missing_reqs(
self, name, req_scheme, dry_run, basename, env_name
):
env_prefix = self.env_prefix
if env_name:
env_name = env_prefix + env_name
dirs = []
with ctx.project("dummy_package", dirs=dirs):
x = env.PyenvEnvironment(
req_scheme,
basename=basename,
env_name=env_name,
env_prefix=env_prefix,
dry_run=dry_run,
force=True,
)
self.env_name = x.env_name
with self.assertRaises(exc.MissingRequirementsError):
if not flags.should_suppress_output():
x.create()
else:
with ctx.capture_to_file(x.create) as (
_status,
_stdout,
_stderr,
):
pass
@parameterized.parameterized.expand(
[
("plain_dry_run", reqs.REQ_SCHEME_PLAIN, True, None, True),
("plain", reqs.REQ_SCHEME_PLAIN, False, None, True),
(
"plain_dry_run_env_name",
reqs.REQ_SCHEME_PLAIN,
True,
"dummy-env",
True,
),
("plain_env_name", reqs.REQ_SCHEME_PLAIN, False, "dummy-env", True),
]
)
def test_PV_ENV_PYNV_130_create_duplicate(
self, name, req_scheme, dry_run, env_name, should_raise
):
env_prefix = self.env_prefix
if env_name:
env_name = env_prefix + env_name
dirs = []
filespecs = {
"requirements.txt": "argcomplete",
"requirements_dev.txt": "argcomplete",
"requirements_frozen.txt": "argcomplete == 1.12.3",
os.path.join("dev", "requirements_build.txt"): "",
os.path.join("dev", "requirements_dev.txt"): "",
os.path.join("dev", "requirements_test.txt"): "parameterized",
}
with ctx.project("dummy_package", dirs=dirs, filespecs=filespecs):
x = env.PyenvEnvironment(
req_scheme,
env_name=env_name,
env_prefix=env_prefix,
dry_run=False,
force=True,
)
self.env_name = x.env_name
if not flags.should_suppress_output():
x.create()
else:
with ctx.capture_to_file(x.create) as (_status, _stdout, _stderr):
pass
x = env.PyenvEnvironment(
req_scheme,
env_name=env_name,
env_prefix=env_prefix,
dry_run=dry_run,
force=True,
)
if should_raise:
with self.assertRaises(exc.EnvExistsError):
if not flags.should_suppress_output():
x.create()
else:
with ctx.capture_to_file(x.create) as (
_status,
_stdout,
_stderr,
):
pass
else:
if not flags.should_suppress_output():
x.create()
else:
original_stderr = None
with ctx.capture_to_file(x.create) as (_status, _stdout, stderr):
original_stderr = stderr
testable_stderr = original_stderr.lower()
if "error" in testable_stderr:
print(original_stderr, file=stderr)
self.assertNotIn("error", testable_stderr)
########################################
@unittest.skipUnless(flags.should_run_pyenv_tests(), flags.SKIP_PYENV_MESSAGE)
class TestEnv_220_PyenvRemove(unittest.TestCase):
def setUp(self):
self.env_name = None
try:
self.choices
except AttributeError:
self.choices = (
[chr(x) for x in range(ord("0"), ord("9") + 1)]
+ [chr(x) for x in range(ord("A"), ord("Z") + 1)]
+ [chr(x) for x in range(ord("a"), ord("z") + 1)]
)
# Random prefix for environments is required
# since pyenv virtualenv doesn't give us a choice
# to place an environment somewhere specific.
self.env_prefix = "".join(random.choice(self.choices) for x in range(10)) + "-"
def tearDown(self):
if self.env_name is not None:
# remove pyenv virtual environment
subprocess.call(
["pyenv", "virtualenv-delete", "-f", self.env_name],
stderr=subprocess.DEVNULL,
)
self.env_name = None
@parameterized.parameterized.expand(
[
("plain_dry_run", reqs.REQ_SCHEME_PLAIN, True, None, None, []),
("plain", reqs.REQ_SCHEME_PLAIN, False, None, None, []),
(
"plain_dry_run_env_name",
reqs.REQ_SCHEME_PLAIN,
True,
None,
"dummy-env",
[],
),
("plain_env_name", reqs.REQ_SCHEME_PLAIN, False, None, "dummy-env", []),
("dev_dry_run", reqs.REQ_SCHEME_DEV, True, None, None, []),
("dev", reqs.REQ_SCHEME_DEV, False, None, None, []),
("devplus_dry_run", reqs.REQ_SCHEME_DEVPLUS, True, None, None, []),
("devplus", reqs.REQ_SCHEME_DEVPLUS, False, None, None, []),
("frozen_dry_run", reqs.REQ_SCHEME_FROZEN, True, None, None, []),
("frozen", reqs.REQ_SCHEME_FROZEN, False, None, None, []),
("source_dry_run", reqs.REQ_SCHEME_SOURCE, True, None, None, []),
("source", reqs.REQ_SCHEME_SOURCE, False, None, None, []),
("wheel_dry_run", reqs.REQ_SCHEME_WHEEL, True, None, None, []),
("wheel", reqs.REQ_SCHEME_WHEEL, False, None, None, []),
("package_dry_run", reqs.REQ_SCHEME_PACKAGE, True, "argcomplete", None, []),
("package", reqs.REQ_SCHEME_PACKAGE, False, "argcomplete", None, []),
("pip_dry_run", reqs.REQ_SCHEME_PIP, True, None, None, ["argcomplete"]),
("pip", reqs.REQ_SCHEME_PIP, False, None, None, ["argcomplete"]),
]
)
def test_PV_ENV_PYNV_210_remove(
self, name, req_scheme, dry_run, basename, env_name, pip_args
):
env_prefix = self.env_prefix
if env_name:
env_name = env_prefix + env_name
dirs = []
filespecs = {
"requirements.txt": "argcomplete",
"requirements_dev.txt": "argcomplete",
"requirements_frozen.txt": "argcomplete == 1.12.3",
os.path.join("dev", "requirements_build.txt"): "",
os.path.join("dev", "requirements_dev.txt"): "",
os.path.join("dev", "requirements_test.txt"): "parameterized",
}
with ctx.project("dummy_package", dirs=dirs, filespecs=filespecs):
x = env.PyenvEnvironment(
req_scheme,
pip_args=pip_args,
basename=basename,
env_name=env_name,
env_prefix=env_prefix,
dry_run=dry_run,
force=True,
)
y = env.PyenvEnvironment(
req_scheme,
pip_args=pip_args,
basename=basename,
env_name=env_name,
env_prefix=env_prefix,
dry_run=False,
force=True,
)
self.env_name = y.env_name
if not flags.should_suppress_output():
x.remove() # remove non-existent
y.create()
x.remove() # remove existing
else:
original_stderrs = []
with ctx.capture_to_file(x.remove) as (_status, _stdout, stderr):
original_stderrs.append(stderr)
with ctx.capture_to_file(y.create) as (_status, _stdout, stderr):
original_stderrs.append(stderr)
with ctx.capture_to_file(x.remove) as (_status, _stdout, stderr):
original_stderrs.append(stderr)
testable_stderrs = [text.lower() for text in original_stderrs]
for (i, text) in enumerate(testable_stderrs):
if "error" in text:
print(original_stderrs[i], file=stderr)
self.assertNotIn("error", text)
########################################
@unittest.skipUnless(flags.should_run_pyenv_tests(), flags.SKIP_PYENV_MESSAGE)
class TestEnv_230_PyenvReplace(unittest.TestCase):
def setUp(self):
self.env_name = None
try:
self.choices
except AttributeError:
self.choices = (
[chr(x) for x in range(ord("0"), ord("9") + 1)]
+ [chr(x) for x in range(ord("A"), ord("Z") + 1)]
+ [chr(x) for x in range(ord("a"), ord("z") + 1)]
)
# Random prefix for environments is required
# since pyenv virtualenv doesn't give us a choice
# to place an environment somewhere specific.
self.env_prefix = "".join(random.choice(self.choices) for x in range(10)) + "-"
def tearDown(self):
if self.env_name is not None:
# remove pyenv virtual environment
subprocess.call(
["pyenv", "virtualenv-delete", "-f", self.env_name],
stderr=subprocess.DEVNULL,
)
self.env_name = None
@parameterized.parameterized.expand(
[
("plain_dry_run", reqs.REQ_SCHEME_PLAIN, True, None, None, []),
("plain", reqs.REQ_SCHEME_PLAIN, False, None, None, []),
(
"plain_dry_run_env_name",
reqs.REQ_SCHEME_PLAIN,
True,
None,
"dummy-env",
[],
),
("plain_env_name", reqs.REQ_SCHEME_PLAIN, False, None, "dummy-env", []),
("dev_dry_run", reqs.REQ_SCHEME_DEV, True, None, None, []),
("dev", reqs.REQ_SCHEME_DEV, False, None, None, []),
("devplus_dry_run", reqs.REQ_SCHEME_DEVPLUS, True, None, None, []),
("devplus", reqs.REQ_SCHEME_DEVPLUS, False, None, None, []),
("frozen_dry_run", reqs.REQ_SCHEME_FROZEN, True, None, None, []),
("frozen", reqs.REQ_SCHEME_FROZEN, False, None, None, []),
("source_dry_run", reqs.REQ_SCHEME_SOURCE, True, None, None, []),
("source", reqs.REQ_SCHEME_SOURCE, False, None, None, []),
("wheel_dry_run", reqs.REQ_SCHEME_WHEEL, True, None, None, []),
("wheel", reqs.REQ_SCHEME_WHEEL, False, None, None, []),
("package_dry_run", reqs.REQ_SCHEME_PACKAGE, True, "argcomplete", None, []),
("package", reqs.REQ_SCHEME_PACKAGE, False, "argcomplete", None, []),
("pip_dry_run", reqs.REQ_SCHEME_PIP, True, None, None, ["argcomplete"]),
("pip", reqs.REQ_SCHEME_PIP, False, None, None, ["argcomplete"]),
]
)
def test_PV_ENV_PYNV_310_replace_nonexistent(
self, name, req_scheme, dry_run, basename, env_name, pip_args
):
env_prefix = self.env_prefix
if env_name:
env_name = env_prefix + env_name
dirs = []
filespecs = {
"requirements.txt": "argcomplete",
"requirements_dev.txt": "argcomplete",
"requirements_frozen.txt": "argcomplete == 1.12.3",
os.path.join("dev", "requirements_build.txt"): "",
os.path.join("dev", "requirements_dev.txt"): "",
os.path.join("dev", "requirements_test.txt"): "parameterized",
}
with ctx.project("dummy_package", dirs=dirs, filespecs=filespecs):
x = env.PyenvEnvironment(
req_scheme,
pip_args=pip_args,
basename=basename,
env_name=env_name,
env_prefix=env_prefix,
dry_run=dry_run,
force=True,
)
self.env_name = x.env_name
if not flags.should_suppress_output():
x.replace()
else:
original_stderrs = []
with ctx.capture_to_file(x.replace) as (_status, _stdout, stderr):
original_stderrs.append(stderr)
testable_stderrs = [text.lower() for text in original_stderrs]
for (i, text) in enumerate(testable_stderrs):
if "error" in text:
print(original_stderrs[i], file=stderr)
self.assertNotIn("error", text)
@parameterized.parameterized.expand(
[
("plain_dry_run", reqs.REQ_SCHEME_PLAIN, True, None, None, []),
("plain", reqs.REQ_SCHEME_PLAIN, False, None, None, []),
(
"plain_dry_run_env_name",
reqs.REQ_SCHEME_PLAIN,
True,
None,
"dummy-env",
[],
),
("plain_env_name", reqs.REQ_SCHEME_PLAIN, False, None, "dummy-env", []),
("dev_dry_run", reqs.REQ_SCHEME_DEV, True, None, None, []),
("dev", reqs.REQ_SCHEME_DEV, False, None, None, []),
("devplus_dry_run", reqs.REQ_SCHEME_DEVPLUS, True, None, None, []),
("devplus", reqs.REQ_SCHEME_DEVPLUS, False, None, None, []),
("frozen_dry_run", reqs.REQ_SCHEME_FROZEN, True, None, None, []),
("frozen", reqs.REQ_SCHEME_FROZEN, False, None, None, []),
("source_dry_run", reqs.REQ_SCHEME_SOURCE, True, None, None, []),
("source", reqs.REQ_SCHEME_SOURCE, False, None, None, []),
("wheel_dry_run", reqs.REQ_SCHEME_WHEEL, True, None, None, []),
("wheel", reqs.REQ_SCHEME_WHEEL, False, None, None, []),
("package_dry_run", reqs.REQ_SCHEME_PACKAGE, True, "argcomplete", None, []),
("package", reqs.REQ_SCHEME_PACKAGE, False, "argcomplete", None, []),
("pip_dry_run", reqs.REQ_SCHEME_PIP, True, None, None, ["argcomplete"]),
("pip", reqs.REQ_SCHEME_PIP, False, None, None, ["argcomplete"]),
]
)
def test_PV_ENV_PYNV_320_replace_existing(
self, name, req_scheme, dry_run, basename, env_name, pip_args
):
env_prefix = self.env_prefix
if env_name:
env_name = env_prefix + env_name
dirs = []
filespecs = {
"requirements.txt": "argcomplete",
"requirements_dev.txt": "argcomplete",
"requirements_frozen.txt": "argcomplete == 1.12.3",
os.path.join("dev", "requirements_build.txt"): "",
os.path.join("dev", "requirements_dev.txt"): "",
os.path.join("dev", "requirements_test.txt"): "parameterized",
}
with ctx.project("dummy_package", dirs=dirs, filespecs=filespecs):
x = env.PyenvEnvironment(
req_scheme,
pip_args=pip_args,
basename=basename,
env_name=env_name,
env_prefix=env_prefix,
dry_run=dry_run,
force=True,
)
y = env.PyenvEnvironment(
req_scheme,
pip_args=pip_args,
basename=basename,
env_name=env_name,
env_prefix=env_prefix,
dry_run=False,
force=True,
)
self.env_name = y.env_name
if not flags.should_suppress_output():
y.create()
x.replace()
else:
original_stderrs = []
with ctx.capture_to_file(y.create) as (_status, _stdout, stderr):
original_stderrs.append(stderr)
with ctx.capture_to_file(x.replace) as (_status, _stdout, stderr):
original_stderrs.append(stderr)
testable_stderrs = [text.lower() for text in original_stderrs]
for (i, text) in enumerate(testable_stderrs):
if "error" in text:
print(original_stderrs[i], file=stderr)
self.assertNotIn("error", text)
``` |
{
"source": "jmlago/CNN_introductory_course",
"score": 3
} |
#### File: CNN_introductory_course/scripts/CNN_tf_face_challenge.py
```python
import os
import matplotlib.pyplot as plt
import tensorflow as tf
import pandas as pd
import numpy as np
#from datetime import timedelta
home = os.path.expanduser("~")
data_folder = os.path.join(home,"Escritorio/face_recognition/fc_data")
os.chdir(data_folder)
# HYPER-PARAMETERS
batch_size = 750
num_epochs = 10
num_channels = 3
learning_r = 0.0005
seed_of_tf = 1
img_size = 60
num_steps = 150
display_step = 10
# step 1 build pre-data in regular python
filenames = [f for f in os.listdir(data_folder)]
labels = []
for el in filenames:
if "iniesta" in el:
labels.append([1,0,0])
elif "messi" in el:
labels.append([0,1,0])
else:
labels.append([0,0,1])
print("\nImage names")
print(filenames[0:32])
print("\nCorresponding Labels")
print(labels[0:32])
## Build training and validation structure
data = pd.DataFrame(filenames,columns=["Names"])
data["Label"] = labels
np.random.seed(2)
T_indexes = np.random.choice(len(filenames),int(0.8*len(filenames)),replace=False)
T_data = data.iloc()[T_indexes]
V_data = data.drop(T_indexes)
T_filenames,T_labels = T_data["Names"].tolist(),T_data["Label"].tolist()
V_filenames,V_labels = V_data["Names"].tolist(),V_data["Label"].tolist()
with tf.device("/cpu:0"):
tf.set_random_seed(seed_of_tf)
# step 2: create a dataset returning slices of `filenames`
T_dataset = tf.data.Dataset.from_tensor_slices((T_filenames,T_labels))
V_dataset = tf.data.Dataset.from_tensor_slices((V_filenames,V_labels))
# step 3: parse every image in the dataset using `map`
def _parse_function(filename,label):
#filename_print = tf.Print(filename,[filename])
image_string = tf.read_file(filename)
image_decoded = tf.image.decode_jpeg(image_string, channels=3)
image = tf.cast(image_decoded, tf.float32)
return image,label
T_dataset = T_dataset.map(_parse_function)
V_dataset = V_dataset.map(_parse_function)
#dataset = dataset.shuffle(buffer_size=10000)
T_dataset = T_dataset.batch(batch_size)
V_dataset = V_dataset.batch(1)
T_dataset = T_dataset.repeat(num_epochs)
# step 4: create iterator and final input tensor
T_iterator = T_dataset.make_initializable_iterator()
X,Y = T_iterator.get_next()
V_iterator = V_dataset.make_initializable_iterator()
X_V,Y_V = V_iterator.get_next()
with tf.Session() as sess:
sess.run(T_iterator.initializer)
if True:
checking_im,checking_l = sess.run([X,Y])
print("\nVerifying tensor shapes")
print(checking_im.shape)
print(checking_l.shape)
print("\nVerfiying image,label correspondence")
imgplot = plt.imshow(checking_im[4,:,:,1])
label = checking_l[4]
plt.show()
print(label)
def conv_net(x, n_classes, reuse, is_training):
# Define a scope for reusing the variables
with tf.variable_scope('ConvNet', reuse=reuse):
# CHALLENGE data input are images (60*60 pixels)
# Reshape to match picture format [Height x Width x Channel]
# Tensor input become 4-D: [Batch Size, Height, Width, Channel]
x = tf.reshape(x, shape=[-1, img_size, img_size, 3])
# Convolution Layer with 32 filters and a kernel size of 3
conv1 = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu)
# Max Pooling (down-sampling) with strides of 2 and kernel size of 2
conv1 = tf.layers.max_pooling2d(conv1, 2, 2)
# Convolution Layer with 64 filters and a kernel size of 3
conv2 = tf.layers.conv2d(conv1, 64, 3, activation=tf.nn.relu)
# Max Pooling (down-sampling) with strides of 2 and kernel size of 2
conv2 = tf.layers.max_pooling2d(conv2, 2, 2)
# Convolution Layer with 128 filters and a kernel size of 3
conv3 = tf.layers.conv2d(conv2, 128, 3, activation=tf.nn.relu)
# Max Pooling (down-sampling) with strides of 2 and kernel size of 2
conv3 = tf.layers.max_pooling2d(conv3, 2, 2)
# Flatten the data to a 1-D vector for the fully connected layer
fc1 = tf.contrib.layers.flatten(conv3)
# Fully connected layer (in contrib folder for now)
fc1 = tf.layers.batch_normalization(fc1)
fc1 = tf.layers.dense(fc1, 30)#50
# Apply Dropout (if is_training is False, dropout is not applied)
#fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training)
# Output layer, class prediction
fc1 = tf.layers.batch_normalization(fc1)
out = tf.layers.dense(fc1, n_classes)
# Because 'softmax_cross_entropy_with_logits' already apply softmax,
# we only apply softmax to testing network
out = tf.nn.softmax(out) if not is_training else out
return out
# Create a graph for training
logits_train = conv_net(X, 3, reuse=False, is_training=True)
# Create another graph for testing that reuse the same weights, but has
# different behavior for 'dropout' (not applied).
logits_Val = conv_net(X_V, 3, reuse=True, is_training=False)
# Define loss and optimizer (with train logits, for dropout to take effect)
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=logits_train, labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_r)
train_op = optimizer.minimize(loss_op)
# Evaluate model (with test logits, for dropout to be disabled)
correct_pred = tf.equal(tf.argmax(logits_train, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Evaluate model (with test logits, for dropout to be disabled)
correct_pred_V = tf.equal(tf.argmax(logits_Val, 1), tf.argmax(Y_V, 1))
accuracy_V = tf.reduce_mean(tf.cast(correct_pred_V, tf.float32))
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
with tf.Session() as sess:
# Run the initializer
sess.run(T_iterator.initializer)
sess.run(init)
# Training cycle
for step in range(1, num_steps + 1):
try:
# Run optimization
sess.run(train_op)
except tf.errors.OutOfRangeError:
# Reload the iterator when it reaches the end of the dataset
sess.run(T_iterator.initializer)
sess.run(train_op)
if step % display_step == 0 or step == 1:
# Calculate batch loss and accuracy
# (note that this consume a new batch of data)
loss, acc = sess.run([loss_op, accuracy])
print("Step " + str(step) + ", Minibatch Loss= " + \
"{:.4f}".format(loss) + ", Training Accuracy= " + \
"{:.3f}".format(acc))
print("Optimization Finished!")
# Validation cycle
sess.run(V_iterator.initializer)
acc_val = 0
for step in range(len(V_data)):
acc_val = acc_val + sess.run(accuracy_V)
print("Validation finished with an accuracy of "+str(float(acc_val)/len(V_data)))
``` |
{
"source": "jmlawrence/lawrence2",
"score": 3
} |
#### File: jmlawrence/lawrence2/Lawrence2 API.py
```python
import pandas as pd
import numpy as np
def initialize(context):
set_symbol_lookup_date('2014-07-12')
context.spy = symbol('SPY')
context.dow = symbol("DOW")
# USER SETTINGS START #
context.bear_stock = symbol('COKE') # Bear Stock
context.bull_stock = symbol('PEP') # Bull Stock
# Whether the log should output extra info
context.display_extra_info = True
# Shares to be traded per transaction
context.bear_shares = 1000
context.bull_shares = 2500
context.day = 0
# The commisions per trade
set_commission(commission.PerTrade(cost=5.00))
# USER SETTINGS END #
def handle_data(context, data):
c = context
c.data = data
coke = c.bear_stock
pepsi = c.bull_stock
coke_shares = dollars_of_shares(5000, coke, c)
pepsi_shares = dollars_of_shares(5000, pepsi, c)
c.stocks = [c.bear_stock, c.bull_stock]
normal_spread = 20.39
#### AVAILABLE FUNCTIONS START ####
# spread_of([stock A], [stock B], [normal], c)
# -> Returns the percentage(%) of spread between the two stocks off from the normal difference
# once_a_week(c)
# -> Returns whether it has been a week since last trade or not
# dollars_of_shares([dollar amount per trade], [stock to trade], c)
# -> Returns the number of shares the given amount of money can buy of that stock at market price
# spy_is(["up" or "down"], c)
# -> Returns whether the S&P 500 is down or up for the day
# dow_is(["up" or "down"], c)
# -> Returns whether the Dow Jones Industrial Average is down or up for the day
# time_is(["time"])
# -> Returns whether it is a given time or not
# end_of_day(c)
# -> Returns whether it is 2:59 p.m. CST
# current_price([stock], c)
# -> Returns the current price of the given stock
# opening_price([stock], c)
# -> Returns the opening price for current day of the given stock
# percent_change([stock], c)
# -> Returns the percentage the stock has changed since opening
# close_positions(c)
# -> Closes all open market positions
# get_last_price([stock], c)
# -> Returns the price of the stock price of the previous minute
# average_of([sample size], ["sample denomination"], [stock], c)
# -> Returns the average price of the given stock's sample size * sample denomination
# buy([stock], c)
# -> Buys the predetermined amount of the given stock
# sell([stock], c)
# -> Sells the predetermined amount of the given stock
# cash_is(c)
# -> Returns the amount of liquid assets available in portfolio
# price_change([stock], c)
# -> Returns for the given stock what percentage the price has change since last tick
# volume_is([stock], c)
# -> Returns the volume for the given stock at the moment
# next_day(c)
# -> Tells computer that it is the next day - needed when trying to do something once a week
#### AVAILABLE FUNCTIONS END ####
# START TRADING LOOP #
if time_is("1:30"):
next_day(c)
if spread_of(coke, pepsi, normal_spread, c) < -1:
buy(coke, coke_shares, c)
sell(pepsi, pepsi_shares, c)
elif spread_of(coke, pepsi, normal_spread, c) > 1:
sell(coke, coke_shares, c)
buy(pepsi, pepsi_shares, c)
if end_of_day() and once_a_week(c):
print("2%" < "23%")
close_positions(c)
# END TRADING LOOP #
# HELPER FUNCTIONS #
def next_day(c):
c.day += 1
def dollars_of_shares(amount, stock, c):
return amount/current_price(stock, c)
def price_change(stock, c):
return abs((current_price(stock, c)/get_last_price(stock, c))-1) * 100
def spread_of(bearStock, bullStock, normal, context):
return (((current_price(bullStock, context) / current_price(bearStock, context)) - 1) * 100) - normal
def once_a_week(context):
return (context.day % 5 == 5)
def buy(stock, amount, c):
trade("buy", stock, amount, c)
def sell(stock, amount, c):
trade("sell", stock, amount, c)
def percent_change(stock, context):
return (current_price(stock, context) / opening_price(stock, context))
def opening_price(stock, context):
return context.data[stock].open_price
def current_price(stock, context):
return context.data[stock].price
def close_positions(context):
log.info("# START CLOSING ALL POSITIONS #")
for stock in context.stocks:
if context.portfolio.positions[stock].amount < 0:
trade("buy", stock, context.portfolio.positions[stock].amount, context)
elif context.portfolio.positions[stock].amount > 0:
trade("sell", stock, context.portfolio.positions[stock].amount, context)
log.info("# ALL POSITIONS CLOSED #")
def end_of_day():
return time_is("2:59")
def spy_is(string, c):
if string.lower() == "up":
return c.data[c.spy].open_price < c.data[c.spy].price
else:
return c.data[c.spy].open_price > c.data[c.spy].price
def dow_is(string, c):
if string.lower() == "up":
return c.data[c.dow].open_price < c.data[c.dow].price
else:
return c.data[c.dow].open_price > c.data[c.dow].price
def get_last_price(stock, c):
hist = history(bar_count=3, frequency='1m', field='price')
return hist[stock][-3]
def average_of(num, timeInterval, stock, context):
timeInterval = timeInterval.lower()
if timeInterval == "day" or timeInterval == "days":
if num == 1:
hist = history(bar_count=1, frequency='1d', field='price')
elif num == 7 or num == "week":
hist = history(bar_count=7, frequency='1d', field='price')
elif num == 30 or num == "month":
hist = history(bar_count=30, frequency='1d', field='price')
elif num == 365 or num == "year":
hist = history(bar_count=365, frequency='1d', field='price')
elif timeInterval == "hour" or timeInterval == "hours":
if num == 1:
hist = history(bar_count=60, frequency='1m', field='price')
elif num == 2:
hist = history(bar_count=120, frequency='1m', field='price')
elif num == 3:
hist = history(bar_count=180, frequency='1m', field='price')
elif num == 4:
hist = history(bar_count=240, frequency='1m', field='price')
elif timeInterval == "minutes" or timeInterval == "minute":
if num == 1:
hist = history(bar_count=2, frequency='1m', field='price')
elif num == 5:
hist = history(bar_count=5, frequency='1m', field='price')
elif num == 10:
hist = history(bar_count=10, frequency='1m', field='price')
elif num == 25:
hist = history(bar_count=25, frequency='1m', field='price')
elif num == 50:
hist = history(bar_count=50, frequency='1m', field='price')
elif num == 100:
hist = history(bar_count=100, frequency='1m', field='price')
elif num == 180:
hist = history(bar_count=180, frequency='1m', field='price')
return hist.mean()[stock]
def volume_is(stock, context):
return context.data[stock].volume
def cash_is(context):
return context.portfolio.cash
def time_is(s):
currentTime = get_datetime()
hourAndMinute = parseTime(s)
currentHour = str((currentTime.hour - 5) % 12)
currentHour = "12" if currentHour == "0" else currentHour
currentMin = str(currentTime.minute)
currentMin = checkDigits(currentMin)
return (hourAndMinute[0] == currentHour and hourAndMinute[1] == currentMin)
def parseTime(s):
minute = s[-2:]
hour = s[:-3]
return [hour, minute]
def displayTime():
time = get_datetime()
currentHour = str((time.hour - 5) % 12)
currentHour = "12" if currentHour == "0" else currentHour
currentMin = checkDigits(time.minute)
return(currentHour + ":" + currentMin)
def display_info(context, stock, data):
avg = average_of(1, "day", stock, context)
content = "Current Price > AVG" if (avg < context.data[stock].price) else "Current Price < AVG"
log.info("AVG: {0} | Current Price: ${1} | {2} | VOL: {3}".format(
avg, context.data[stock].price, content, context.data[stock].volume))
def checkDigits(num):
s = str(num)
if len(s) == 1:
return ("0" + s)
else:
return s
def getAmOrPm():
if get_datetime().hour < 17:
return "a.m."
else:
return "p.m."
def trade(command, stock, amount, c):
if amount != 0 and amount != None:
shares = abs(amount)
data = c.data
if command.lower() == "buy":
order(stock, shares)
else:
order(stock, -shares)
log.info("Placing an order to {0} {1} shares at {2} {3} at ${4} per share.".format(
command.upper(), shares, displayTime(), getAmOrPm(), data[stock].price))
if c.display_extra_info:
display_info(c, stock, data)
``` |
{
"source": "Jmlazaro2599/webapp1",
"score": 3
} |
#### File: Jmlazaro2599/webapp1/appy.py
```python
import os
from flask import Flask, url_for, render_template, request
app = Flask(__name__)
@app.route("/")
def render_main():
return render_template('home.html')
@app.route('/ctof')
def render_ctof():
return render_template('ctof.html')
def ctof(ctemp):
return 9/5*ctemp+32
@app.route('/ftoc')
def render_ftoc():
return render_template('ftoc.html')
def ftoc(ftemp):
return (ftemp-32.0)*(5.0/9.0)
@app.route('/mtokm')
def render_mtokm():
return render_template('mtokm.html')
def mtokm(miles):
return miles*1.60934
@app.route('/ctof_result')
def render_ctof_result():
try:
ctemp_result = float(request.args['cTemp'])
ftemp_result = ctof(ctemp_result)
return render_template('ctof_result.html', cTemp=ctemp_result, fTemp=ftemp_result)
except ValueError:
return "Sorry: something went wrong."
@app.route('/ftoc_result')
def render_ftoc_result():
try:
ftemp_result = float(request.args['fTemp'])
ctemp_result = ftoc(ftemp_result)
return render_template('ftoc_result.html', fTemp=ftemp_result, cTemp=ctemp_result)
except ValueError:
return "Sorry: something went wrong"
@app.route('/mtokm_result')
def render_mtokm_result():
try:
miles_result = float(request.args['miles'])
km_result = mtokm(miles_result)
return render_template('mtokm_result.html', miles=miles_result, km=km_result)
except ValueError:
return "Sorry: something went wrong."
if __name__ == "__main__":
port = int(os.environ.get("PORT", 5000))
app.run(port=port)
``` |
{
"source": "jmlazaro25/ldmx-bdt",
"score": 2
} |
#### File: jmlazaro25/ldmx-bdt/mainframe.py
```python
from os import path
import mods.ROOTmanager as manager
import mods.configuration as config
import ROOT.gSystem as ROOTgSystem
#ROOTgSystem.Load('/nfs/slac/g/ldmx/users/${USER}/ldmx-sw/install/lib/libFramework.so')
ROOTgSystem.Load('libFramework.so')
def main():
# Parse command line args and init most important ones
pdict = config.parse()
action_str = pdict['action']
configFile = pdict['config']
if action_str == 'trees': actor = BdtTreeMaker
elif action_str == 'train': actor = BdtTrainer
elif action_str == 'eval': actor = BdtEval
else: quit('\nProvide a valid action')
print('\nUsing {} action from conf file: {}'.format(action_str,configFile))
# If batch, just do the thing and exit
if pdict['batch'] and action_str != 'train':
actor( config.parse_batch_config(pdict) ).run()
quit('\nDONE!')
# Parse an print Config and (Overrides parse options if provided in both)
# Maybe give an overriding message in these cases
proc_confs = config.parse_bdt_config(
action_str,
configFile,
clargs = pdict
)
# Wait for confirmation
if not pdict['batch']:
input('\nPress Enter to continue... ( Also hi, have a good day :D )')
# Construct processes
# Mayhaps multiprocessing for this?
procs = []
for proc_config in proc_confs:
procs.append( actor( proc_config ) )
# Process processes
for proc in procs:
# RUN
proc.run(
strEvent = pdict['startEvent'],
maxEvents = pdict['maxEvents'],
pfreq = 1000
)
del proc # Done with process, remove from memory
print('\nU DONE GOOD UWU !\n')
##################################################
# "Actors"
##################################################
class BdtTreeMaker(manager.TreeProcess):
"""
Make flat trees to train on
Consider multiple inheritance later
"""
def __init__(self, proc_conf):
""" Set up TreeMaker, define event_proces, and init TreeProccess """
# Set up tfMaker to have branches_info
self.tfMaker = manager.TreeMaker(proc_conf.tConfig)
# Get set of branches
branches = set()
for fun_info in proc_conf.tConfig.funcs.values():
for br_tup in fun_info['brs']:
branches.add(br_tup)
# Init parent TreeProcess (sets attrs used in next funcs loop)
super().__init__(
proc_conf.pConfig,
branches = branches,
endfs=[ self.tfMaker.wq ]
)
# Get lists of dicts containing funcs, args, and priorities
# for use in appropriate event_process sections
func_groups = {
'init': ( 0, 2),
'closing': (40,50)
}
dets = ('tracker', 'ecal', 'hcal')
steps = ('_init', '_l1', '_med', '_l2', '_closing')
for det in dets:
for step in steps:
func_groups[ det + step ] =\
(10*(dets.index(det)+1) + steps.index(step),
10*(dets.index(det)+1) + steps.index(step) + 1)
# Begin with list containin dummy to simplify appending condition
for g in func_groups:
setattr(self, g, [{'func': None}])
# Check each function
for fun, info in proc_conf.tConfig.funcs.items():
# Against func_groups and determine which it belongs in
for lab, lims in func_groups.items():
if lims[0] <= info['priority'] < lims[1]:
g = lab
break
# If it's new, add its info to its group
if fun not in [ f for f in getattr(self,g) ]:
getattr(self,g).append( {
'func': fun,
'args': { tup[0]: getattr(self, tup[0]) \
for tup in info['brs'] },
'priority': info['priority']
}
)
# Sort function groups in case of an internal hierarchy, then pop prio
for g in func_groups:
getattr(self,g).pop(0)
if getattr(self,g) != []:
getattr(self,g).sort(key = lambda d: d['priority'])
for fund in getattr(self,g):
fund.pop('priority')
def doFeatFuncs(f_dict, funcs_list, store_dict={}, lq=None):
""" Short function for looping over others in funcs lists """
for fun in funcs_list:
fun['func']( f_dict, fun['args'], store_dict, lq)
def doDetector(f_dict, det, lb_prefix, d_store):
""" Do all doFeatFuncs steps for a given detector """
# Init loop items
doFeatFuncs(f_dict, getattr(self, det + '_init'), d_store)
if getattr(self, det + '_l1') != []:
# Get input branch name
for d in getattr(self, det + '_l1'):
for br in d['args']:
if br[:len(lb_prefix)] == lb_prefix: loop_branch = br
# Loop over said branch while doing lfs
for hit in getattr(self, loop_branch):
doFeatFuncs(
f_dict,
getattr(self, det + '_l1'),
d_store,
hit
)
# Do any intermediary functions
doFeatFuncs(f_dict, getattr(self, det + '_med'), d_store)
# Loop again if needed
if getattr(self, det + '_l2') != []:
for hit in getattr(self, loop_branch):
doFeatFuncs(
f_dict,
getattr(self, det + '_l2'),
d_store,
hit
)
# Any further det functions
doFeatFuncs(f_dict, getattr(self, det + '_closing'), d_store)
# Main event algorithm
def event_process():
""" Algorithm for computing and storing all features """
# Initialize BDT input variables w/ defaults
feats = self.tfMaker.resetFeats()
# Copy from input and other basiic assignments
g_store = {'ebeam': proc_conf.pConfig.ebeam}
doFeatFuncs(feats, self.init, g_store)
# Tell detectors about info so far
tracker_store = {'globals': g_store}
ecal_store = tracker_store.copy()
hcal_store = tracker_store.copy()
# Tracker
doDetector(feats, 'tracker', 'TrackerRecHits', tracker_store) # ?
# Ecal
doDetector(feats, 'ecal', 'EcalRecHits', ecal_store)
# Hcal
doDetector(feats, 'hcal', 'HcalRecHits', hcal_store)
# Any final closing functions
doFeatFuncs(feats, self.closing)
self.tfMaker.fillEvent(feats)
# Tell self about event_process
setattr(self, 'event_process', event_process)
# Light-hearted attempt to save on memory
del branches
del dets
del steps
del func_groups
class BdtTrainer():
""" Train a BDT """
def __init__(self, conf_dict):
""" Set up BDT parameters """
# Print config
config.print_dict(conf_dict, prefix='\nBDT configuration:')
# Set config items as attrs
for k,v in conf_dict.items():
setattr(self, k, v)
# Warning if things haven't been hadded
# Could use normal TChain method (as suggested by use of 'dir') but nah
# Forcing hadding is good incase things crash anyway
self.sets = ('bkg', 'sig')
for st in self.sets:
thingy = getattr(self, f'{st}_indir')
if not path.exists(thingy):
quit(
f'{getattr(self, f"{st}_indir")} does NOT exist'
+ 'Lemme help, try:\n'
+ f'ldmx hadd {thingy} '
+ f'{ "/".join(thingy.split("/")[:-1]) }/{{{st}}}/*'
+ '\nand then try me again'
)
# Yet another conf dictionary
self.params_dict = {
"objective": "binary:logistic",
"eta": self.eta,
"max_depth": self.tree_depth,
"min_child_weight": 20,
"silent": 1,
"subsample": .9,
"colsample_bytree": .85,
"eval_metric": 'error',
"seed": 1,
"nthread": 1,
"verbosity": 1,
"early_stopping_rounds" : 10
}
def run(self, strEvent=None, maxEvents=1.25e6 , pfreq=None):
""" Run the traning - startEvent and pfreq are placeholders """
# import some stuff
import os
import logging
import numpy as np
import pickle as pkl
import xgboost as xgb
import matplotlib as plt
# Seed and logging
np.random.seed( int(self.seed) )
ml_logger = logging.getLogger('matplotlib')
ml_logger.setLevel(logging.WARNING)
plt.use('Agg')
# Organize data for training
for st in self.sets:
# Load tree
tree = manager.load(
[getattr(self, '{}_indir'.format(st))],
self.tree_name
)
events = []
for event in tree:
if len(events) == maxEvents: break
events.append(
[ getattr(event, feat) for feat in self.branches ]
)
events = np.array(events)
new_idx = np.random.permutation(
np.arange( np.shape(events)[0] )
)
np.take(events, new_idx, axis = 0, out=events)
setattr(self, '{}_train_x'.format(st), events)
setattr(self,
'{}_train_y'.format(st),
np.zeros(
len(
getattr( self, '{}_train_x'.format(st) )
)
) + (st == 'sig')
)
# Combine data
train_x = np.vstack(( self.sig_train_x, self.bkg_train_x ))
train_y = np.append( self.sig_train_y, self.bkg_train_y )
train_x[ np.isnan( train_x ) ] = 0.000
train_y[ np.isnan( train_y ) ] = 0.000
training_matrix = xgb.DMatrix(train_x, train_y)
# Actual training
gbm = xgb.train(
self.params_dict,
training_matrix,
int(self.tree_number)
)
# Store BDT
outname = self.outdir.split('/')[-1]
if not os.path.exists(self.outdir):
print( 'Creating %s' % (self.outdir) )
os.makedirs(self.outdir)
output = open('{}/{}_weights.pkl'.format(self.outdir, outname), 'wb')
pkl.dump(gbm, output)
# Plot feature importances
xgb.plot_importance(gbm)
plt.pyplot.savefig(
'{}/{}_fimportance.png'.format(self.outdir, outname), # png name
dpi=500, bbox_inches='tight', pad_inches=0.5 # png parameters
)
# Anounce save location
print('Files saved in: {}'.format(self.outdir))
class BdtEval(manager.TreeProcess):
""" Evaluate BDT on reserved flat trees """
def __init__(self, proc_conf):
# Set up tfMaker to have branches_info
self.tfMaker = manager.TreeMaker(proc_conf.tConfig)
# Init parent TreeProcess (sets attrs used in next funcs loop)
super().__init__(
proc_conf.pConfig,
endfs=[ self.tfMaker.wq ]
)
# import some stuff
import numpy as np
import pickle as pkl
import xgboost as xgb
from mods.feats import trees_info_analysis
# Pretty much always want to pass this for bias plots
# Could be handled better, but skipping the config is nice
analysis_vars = tuple( trees_info_analysis.keys() )
# Set bdt
self.bdt = pkl.load( open( proc_conf.pConfig.bdt, 'rb' ) )
# Store discValue name
for k in self.tfMaker.branches_info.keys():
if k[:9] == 'discValue': discValue_name = k
# Don't try to include these in the list given to self.bdt.predict
no_eval = (*analysis_vars, discValue_name)
# Main event algorithm
def event_process():
# Collect features from this event
feats = []
for feat in self.tfMaker.branches_info:
if feat in no_eval: continue
feats.append( getattr( self.tree, feat ) )
# Copy existing variables to new tree
for f_name in self.tfMaker.branches_info.keys():
if f_name == discValue_name: continue
self.tfMaker.branches[f_name][0] = getattr( self.tree, f_name )
# Add prediction to new tree
evtarray = np.array([feats])
self.tfMaker.branches[discValue_name][0] = \
float( self.bdt.predict( xgb.DMatrix(evtarray) )[0] )
# Fill new tree with current event values
self.tfMaker.tree.Fill()
# Tell self about event_process
setattr(self, 'event_process', event_process)
if __name__ == '__main__': main()
```
#### File: ldmx-bdt/triggerStuff/treeMaker.py
```python
import os
import math
import ROOT as r
import numpy as np
from mods import ROOTmanager as manager
from mods import physTools, mipTracking
cellMap = np.loadtxt('mods/cellmodule.txt')
r.gSystem.Load('libFramework.so')
maxLayer = 20
# TreeModel to build here
branches_info = {
f'energy_{llayer + 1}': {'rtype': float, 'default': 0.} \
for llayer in range(maxLayer)
}
branches_info['energy_tot'] = {'rtype': float, 'default': 0.}
def main():
# Inputs and their trees and stuff
pdict = manager.parse()
batch_mode = pdict['batch']
separate = pdict['separate']
inlist = pdict['inlist']
outlist = pdict['outlist']
group_labels = pdict['groupls']
startEvent = pdict['startEvent']
maxEvents = pdict['maxEvents']
# Should maybe put in parsing eventually and make event_process *arg
# Construct tree processes
procs = []
for gl, group in zip(group_labels,inlist):
procs.append( manager.TreeProcess(event_process, group,
ID=gl, batch=batch_mode, pfreq=1000) )
# Process jobs
for proc in procs:
# Move into appropriate scratch dir
os.chdir(proc.tmp_dir)
# Branches needed
proc.ecalRecHits = proc.addBranch('EcalHit', 'EcalRecHits_v12')
# Tree/Files(s) to make
print('\nRunning %s'%(proc.ID))
proc.tfMakers = {'unsorted': None}
for tfMaker in proc.tfMakers:
proc.tfMakers[tfMaker] = manager.TreeMaker(group_labels[procs.index(proc)]+\
'_{}.root'.format(tfMaker),\
"EcalVeto",\
branches_info,\
outlist[procs.index(proc)]
)
# Gets executed at the end of run()
proc.extrafs = [ proc.tfMakers[tfMaker].wq for tfMaker in proc.tfMakers ]
# RUN
proc.run(strEvent=startEvent, maxEvents=maxEvents)
# Remove scratch directory if there is one
if not batch_mode: # Don't want to break other batch jobs when one finishes
manager.rmScratch()
print('\nDone!\n')
# Process an event
def event_process(self):
# Initialize BDT input variables w/ defaults
feats = next(iter(self.tfMakers.values())).resetFeats()
# Add energies
for hit in self.ecalRecHits:
if hit.getEnergy() < 0: continue
feats['energy_tot'] += hit.getEnergy()
for llayer in range(maxLayer):
if physTools.ecal_layer(hit) <= llayer:
feats[f'energy_{llayer + 1}'] += hit.getEnergy()
self.tfMakers['unsorted'].fillEvent(feats)
if __name__ == "__main__":
main()
``` |
{
"source": "jmlazaro25/vissig",
"score": 2
} |
#### File: mathematica/get_ldmx_acceptance_to_share/banner.py
```python
from __future__ import division
import copy
import logging
import numbers
import os
import sys
import re
import math
#dict
class Banner(dict):
""" """
ordered_items = ['mgversion', 'mg5proccard', 'mgproccard', 'mgruncard',
'slha', 'mggenerationinfo', 'mgpythiacard', 'mgpgscard',
'mgdelphescard', 'mgdelphestrigger','mgshowercard','run_settings']
capitalized_items = {
'mgversion': 'MGVersion',
'mg5proccard': 'MG5ProcCard',
'mgproccard': 'MGProcCard',
'mgruncard': 'MGRunCard',
'mggenerationinfo': 'MGGenerationInfo',
'mgpythiacard': 'MGPythiaCard',
'mgpgscard': 'MGPGSCard',
'mgdelphescard': 'MGDelphesCard',
'mgdelphestrigger': 'MGDelphesTrigger',
'mgshowercard': 'MGShowerCard' }
def __init__(self, banner_path=None):
""" """
if isinstance(banner_path, Banner):
dict.__init__(self, banner_path)
self.lhe_version = banner_path.lhe_version
return
else:
dict.__init__(self)
if banner_path:
self.read_banner(banner_path)
############################################################################
# READ BANNER
############################################################################
pat_begin=re.compile('<(?P<name>\w*)>')
pat_end=re.compile('</(?P<name>\w*)>')
tag_to_file={'slha':'param_card.dat',
'mgruncard':'run_card.dat',
'mgpythiacard':'pythia_card.dat',
'mgpgscard' : 'pgs_card.dat',
'mgdelphescard':'delphes_card.dat',
'mgdelphestrigger':'delphes_trigger.dat',
'mg5proccard':'proc_card_mg5.dat',
'mgproccard': 'proc_card.dat',
'init': '',
'mggenerationinfo':'',
'scalesfunctionalform':'',
'montecarlomasses':'',
'initrwgt':'',
'madspin':'madspin_card.dat',
'mgshowercard':'shower_card.dat',
'run_settings':''
}
def read_banner(self, input_path):
"""read a banner"""
if isinstance(input_path, str):
if input_path.find('\n') ==-1:
input_path = open(input_path)
else:
def split_iter(string):
return (x.groups(0)[0] for x in re.finditer(r"([^\n]*\n)", string, re.DOTALL))
input_path = split_iter(input_path)
text = ''
store = False
for line in input_path:
if self.pat_begin.search(line):
if self.pat_begin.search(line).group('name').lower() in self.tag_to_file:
tag = self.pat_begin.search(line).group('name').lower()
store = True
continue
if store and self.pat_end.search(line):
if tag == self.pat_end.search(line).group('name').lower():
self[tag] = text
text = ''
store = False
if store:
if line.endswith('\n'):
text += line
else:
text += '%s%s' % (line, '\n')
#reaching end of the banner in a event file avoid to read full file
if "</init>" in line:
break
elif "<event>" in line:
break
def __getattribute__(self, attr):
"""allow auto-build for the run_card/param_card/... """
try:
return super(Banner, self).__getattribute__(attr)
except:
if attr not in ['run_card', 'param_card', 'slha', 'mgruncard', 'mg5proccard', 'mgshowercard', 'foanalyse']:
raise
return self.charge_card(attr)
```
#### File: mathematica/get_ldmx_acceptance_to_share/detector_hit_conditions_visible_decay.py
```python
import numpy as np
import collections
def _xv_from_uni(xi,zmax,gct):
"""
Generate a z vertex displacement from a uniform random variable
both zmax and gct should be in cm
"""
if xi > 0.:
return gct*np.log(xi) + zmax
else:
return -100.*zmax
xv_from_uni = np.vectorize(_xv_from_uni)
def _det_hit_condition(_pv, _pl, det_rad, zmax, xv, Ethr=1.):
"""
returns true if lepton hits a circular detector of radius det_rad,
if it originates from a vector that decays a distance xv from the detector
pv = relativistic 4 vector momentum
pl = relativistic 4 lepton momentum
det_rad = detector radius in cm
xv = z distance of the vector decay vertex from the detector in cm
"""
#Ethr = 1. # E137 Ecal detector threshold energy
pv = np.array(_pv)
pl = np.array(_pl)
# xv < 0 corresponds to decays beyond the detector
if xv < 0:
return False
#print pv
pvt = pv[1:3]
pvz = pv[3]
plt = pl[1:3]
plz = pl[3]
# transverse displacement of vector when it decays
#print xv#,(zmax-xv)*pvt,pvz
vec_delta = (zmax-xv)*pvt/pvz
#print pvt/pvz, np.linalg.norm(vec_delta)
# point at which lepton momentum crosses the detector x-y plane
rvec = vec_delta + xv*plt/plz
#print rvec, np.sqrt(np.dot(rvec,rvec))
return (pl[0] >= Ethr) and (np.dot(rvec,rvec) < (det_rad)**2.)
# Vectorized version of the above
def det_hit_condition(pv, pl, det_rad, zmax, xvs, Ethr=1.):
if not isinstance(xvs, collections.Iterable):
xvs = np.array([xvs])
return np.array([_det_hit_condition(pv, pl, det_rad, zmax, xv, Ethr) for xv in xvs])
```
#### File: stats/old/reformat_0.py
```python
import re
import os
import decimal
import argparse
import numpy as np
from sympy import Symbol
from scipy import interpolate
from sympy.stats import sample, Uniform, Exponential
def main():
""" Add interactivity for LHE analysis """
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('-m', action='store', dest='mAp', type=float)
parser.add_argument('-e', action='store', dest='eps', type=float)
parser.add_argument('-i', action='store', dest='infile',)
args = parser.parse_args()
mAp, eps = args.mAp, args.eps
rsample_factor = 10
# Will store information here
nEvents = 10_000
resamples_allowed = 500
resamples = np.zeros( nEvents )
decay_zs = [] # Only those within bounds on first sample
decay_zs_resampled = [] # Within bounds after < resamples_allowed
positions = np.zeros( (nEvents, 3) )
report = {
'Resampled events (normalized)': 0,
'Average resamples': 0,
'Max resamples': 0
}
##################################################
# Analysis (similar to writeBremDecay, but dev-y as opposed to pro-y)
##################################################
# Creation XYZ
Sym = Symbol('q')
x_rv = Uniform(Sym, -10 , 10 )
y_rv = Uniform(Sym, -40 , 40 )
#z_rv = Uniform(Sym, -0.175, 0.175)
Xs = sample( x_rv, numsamples=nEvents, seed=np.random.seed( 2 ) )
Ys = sample( y_rv, numsamples=nEvents, seed=np.random.seed( 2 ) )
#Zs = sample( z_rv, numsamples=nEvents, seed=np.random.seed( 2 ) )
# Detector limits
zmin = 300
zmax = 4000 - 300
# Decay time
c_speed = 299_792_458_000 # mm/s
t = Symbol('t')
decay_width = gamma_ap_tot(mAp, eps)
tau = 6.582e-25 / decay_width # hbar = 6.582e-25 GeV*s
decay_rv = Exponential(t, 1/tau)
decay_t = sample(
decay_rv,
numsamples=nEvents*rsample_factor,
seed=np.random.seed( 2 )
)
# Open file for analysis
with open(args.infile, 'r') as ogfile:
# Skip from header/init
for line in ogfile:
if line == '</init>\n':
break
##################################################
# events
##################################################
event_num = 0
event_line = 0
current_line = 0
for line in ogfile: # Picks up where last loop leaves off
current_line += 1
# Scale relevant lines
if line == '<event>\n':
event_num += 1
event_line = 0
if event_num % 1000 == 0:
print( 'Reformatting event: {}'.format(event_num) )
else: event_line += 1
if event_line == 6: # Take note of Ap info for projection
line = rescaleLine(line, tokens=range(6,11))
px,py,pz,en = [
float(v) for v in numsInLine(line)[6:10]
]
Ap_3mom = np.array((px,py,pz))
""" Might care about these later
elif event_line < 9: # decay electrons
# Null parents
line = replaceNums(line, [2,3], [-1,-1])
"""
# Skip mgrwt. add appropriate vertex, and end event
if event_line == 16 :
# Add verticies
#x,y,z,t = next(Xs), next(Ys), next(Zs), next(decay_t)*(en/mAp)
x,y,z = next(Xs), next(Ys), 0
c_vertex = np.array( (x,y,z) )
t = -1 # While prep
d_vertex = c_vertex + Ap_3mom*c_speed / mAp * t
resamps = -1
while not ( zmin <= d_vertex[2] <= zmax ):
resamps += 1
"""
if resamps == 50:
print(f'50 samples on event {event_num}: {Ap_3mom}')
"""
if resamps == resamples_allowed:
#print(f'500 samples on event {event_num}; p = {Ap_3mom}')
break
try:
t = next(decay_t)*(en/mAp)
d_vertex = c_vertex + Ap_3mom*c_speed / mAp * t
except: # ran out of decay_t
# Fill report
report['Resampled events (normalized)'] \
= np.count_nonzero(resamples) / event_num
report['Average resamples'] \
= np.average(resamples)*nEvents / event_num
report['Max resamples'] = max(resamples)
report['Events before quitting'] = event_num
# Print report
print(resamples[:event_num])
for k,v in report.items():
print( '{}: {}'.format(k,v) )
quit()
if resamps == 0:
decay_zs.append(d_vertex[2])
elif resamps < resamples_allowed:
decay_zs_resampled.append(d_vertex[2])
resamples[event_num-1] = resamps
# Fill report
report['Resampled events (normalized)'] \
= np.count_nonzero(resamples) / nEvents
report['Average resamples'] = np.average(resamples)
report['Max resamples'] = max(resamples)
# Print report
for k,v in report.items():
print( '{}: {}'.format(k,v) )
# Plot resamples
"""
import matplotlib.pyplot as plt
plt.hist(resamples)
plt.title(r"$m_{A'} =$" + str(mAp) + r" GeV, $\epsilon$ = " + str(eps))
# Plot decay_zs, decay_zs, and combo
#plt.figure()
fig, (ax1, ax2) = plt.subplots(nrows=2)
ns, bins, p = ax1.hist(
[decay_zs,decay_zs_resampled],
histtype='step',
log=True,
bins=50,
range = (zmin,zmax),
density=True,
label=['decay_zs','decay_zs_resampled']
)
plt.bar( bins[:-1 ] , ns[1] / ns[0], label='nolog', width=68)
plt.bar( bins[:-1 ] , np.log(ns[1]/ns[0]) / np.log(10),
label='log', width=68 )
plt.legend(loc='upper center')
plt.ylabel(r'A.U.')
plt.xlabel(r'Decay Z [mm]')
plt.hist(
decay_zs_resampled,
histtype='step',
log=True,
bins=50,
range = (zmin,zmax),
density=True,
label='decay_zs_resampled'
)
plt.legend(loc='upper center')
plt.ylabel(r'A.U.')
plt.xlabel(r'Decay Z [mm]')
plt.hist(
decay_zs + decay_zs_resampled,
histtype='step',
log=True,
bins=50,
range = (zmin,zmax),
density=True,
label='Combination'
)
plt.legend(loc='upper right')
plt.ylabel(r'A.U.')
plt.xlabel(r'Decay Z [mm]')
plt.title(r"$m_{A'} =$" + str(mAp) + r" GeV, $\epsilon$ = " + str(eps))
plt.show()
"""
def writeBremDecay(lhe, mAp, eps, seed, outdir=None, nevents=10_000):
""" Break A'->ee LHEs into brem and decay files and reformat/rescale """
# Write to input directory if outdir not provided
if outdir is None: outdir = '/'.join( lhe.split('/')[:-1] )
# Create outdir if needed
if not os.path.exists(outdir): os.makedirs(outdir)
# Outfile names
genname = lhe.split('/')[-1].split('.lhe')[0] \
+ '_run{}'.format(seed) \
+ '_eps{}'.format(eps)
bremfile = '{}/{}_brem.lhe'.format(outdir,genname)
decayfile = '{}/{}_decay.lhe'.format(outdir,genname)
print( 'Reformatting:\n{}\nInto:\n{}\n{}'.format(lhe,bremfile,decayfile) )
# Creation XYZ
Sym = Symbol('q')
x_rv = Uniform(Sym, -10 , 10 )
y_rv = Uniform(Sym, -40 , 40 )
#z_rv = Uniform(Sym, -0.175, 0.175)
Xs = sample( x_rv, numsamples=nevents, seed=np.random.seed( seed ) )
Ys = sample( y_rv, numsamples=nevents, seed=np.random.seed( seed ) )
#Zs = sample( z_rv, numsamples=nevents, seed=np.random.seed( seed ) )
# Detector limits
zmin = 300
zmax = 4000 - 300
# Decay time
c_speed = 299_792_458_000 # mm/s
t = Symbol('t')
decay_width = gamma_ap_tot(mAp, eps)
tau = 6.582e-25 / decay_width # hbar = 6.582e-25 GeV*s
decay_rv = Exponential(t, 1/tau)
decay_t = sample(
decay_rv,
numsamples=nevents,
seed=np.random.seed( seed )
)
# Open original and output files
with open(lhe, 'r') as ogfile, \
open(bremfile, 'w') as bremf, \
open(decayfile, 'w') as decayf:
##################################################
# Edit header (techincaly until </init>
# Many conditions shouldn't check in events sec.
##################################################
scaling_mass = False
for line in ogfile:
# ebeams
if re.search(r'ebeam',line):
line = rescaleLine(line)
# Masses
if line[:10] == 'BLOCK MASS':
scaling_mass = True # Indicate following lines should be scaled
continue
if line[0] == '#':
scaling_mass = False
if scaling_mass:
line = rescaleLine(line, tokens=[1])
# Decay Width
if re.match(r'DECAY +622', line):
line = replaceNums(line, [1], [decay_width])
# Break from header/init
if line == '</init>\n':
bremf.write(line)
decayf.write(line)
break
bremf.write(line)
decayf.write(line)
##################################################
# Edit events
##################################################
event_num = 0
event_line = 0
current_line = 0
for line in ogfile: # Picks up where last loop leaves off
current_line += 1
# Scale relevant lines
if line == '<event>\n':
event_num += 1
event_line = 0
decayf.write(line)
if event_num % 1000 == 0:
print( 'Reformatting event: {}'.format(event_num) )
else: event_line += 1
if 1 < event_line < 9:
line = rescaleLine(line, tokens=range(6,11))
# Event info line
if event_line ==1:
# Correct particle number
line = replaceNums(line, [0], [5])
bremf.write(line)
line = replaceNums(line, [0], [2])
decayf.write(line)
elif event_line < 7: # If first 5 write to bremfile
bremf.write(line)
if event_line == 6: # Take note of Ap info for projection
px,py,pz,en = [
float(v) for v in numsInLine(line)[6:10]
]
Ap_3mom = np.array((px,py,pz))
elif event_line < 9: # decay electrons
# Null parents
line = replaceNums(line, [2,3], [-1,-1])
decayf.write(line)
# Skip mgrwt. add appropriate vertex, and end event
elif event_line == 16 :
# Add verticies
#x,y,z,t = next(Xs), next(Ys), next(Zs), next(decay_t)*(en/mAp)
x,y,z,t = next(Xs), next(Ys), 0, next(decay_t)*(en/mAp)
bremf.write( '#vertex {} {} {}\n'.format(x,y,z) )
c_vertex = np.array( (x,y,z) )
d_vertex = c_vertex + Ap_3mom*c_speed / mAp * t
decayf.write( '#vertex {} {} {} {}\n'.format(*d_vertex,t) )
# End event
bremf.write(line)
decayf.write(line)
# End both
elif line == '</LesHouchesEvents>\n':
bremf.write(line)
decayf.write(line)
return bremfile, decayfile
# <From <NAME>> ##################################################
# Hadronic R ratio used to compute hadronic width of the A'
Rvals=np.loadtxt("reformat/r_fixed.dat")
#Rvals=np.loadtxt("r_fixed.dat")
Rvals_interp = interpolate.interp1d(Rvals[:,0],Rvals[:,1],kind='linear');
def Rfunc(s):
if np.sqrt(s) >= 0.36:
return Rvals_interp(np.sqrt(s))
else:
return 0.
def gamma_ap_to_ll(mAp,ml,eps):
if mAp < 2.*ml:
return 0.
aEM=1/137.035999679
beta=1. - (2*ml/mAp)**2
return (1./3.)*(aEM*eps**2)*mAp*np.sqrt(beta)*(1 + (2*ml**2)/mAp**2)
# Total decay rate of Ap into electrons and muons and hadrons
# Valid for mAp > 2*me
# Width is returned in GeV
def gamma_ap_tot(mAp, epsilon):
me = 0.51099895/1000.
mmu = 105.6583745/1000.
return gamma_ap_to_ll(mAp,me,epsilon) \
+ gamma_ap_to_ll(mAp,mmu,epsilon)*(1. + Rfunc(mAp**2))
# </From <NAME>> ##################################################
##################################################
# Line editing
##################################################
def numsInLine(line_w_nums):
""" Find numbers in line """
nums = re.findall(r' [\d,\.,e,\-,\+]+', line_w_nums) # Seems close enough
return [ num[1:] for num in nums ]
def rescaleLine(line_w_nums, scale=decimal.Decimal('0.1'), tokens=[0]):
""" Replace numbers at given tokens (indicies) with scaled versions """
numbers = numsInLine(line_w_nums)
numbers = [ numbers[i] for i in tokens ] # Get numbers at desired indicies
scaled_line = line_w_nums
for number in numbers:
scaled_line = re.sub(re.sub(r'\+', r'\\+', number), # looks like - equiv not needed
str(decimal.Decimal(number)*scale), scaled_line,
count=1)
return scaled_line
def replaceNums(line_w_nums, tokens, new_vals):
""" Replace numbers at given tokens (indicies) with specific new_vals """
numbers = numsInLine(line_w_nums)
numbers = [ numbers[i] for i in tokens ]# Numbers we care about
new_line = line_w_nums
for number, new_val in zip(numbers,new_vals):
new_line = re.sub(number, str(new_val), new_line, count=1)
return new_line
if __name__=='__main__': main()
```
#### File: vissig/dev/vis_reach.py
```python
import numpy as np
import sys
import array
tmpargv = sys.argv
sys.argv = []
import getopt
import ROOT
from ROOT import gROOT, TFile, TTree, TChain, gDirectory, TLine, gStyle, TCanvas, TLegend, TH1, TH1F, TH2F, TF1, TLatex
sys.argv = tmpargv
def openPDF(outfile,canvas):
canvas.Print(outfile+".pdf[")
def closePDF(outfile,canvas):
canvas.Print(outfile+".pdf]")
def N_ap(m, eps, eot):
return 7. * pow(eps/1.e-5, 2) * pow(0.1/m, 2) * eot / 1.e16
def N_sig(Naprime, zmin, zmax, gctau):
return Naprime * (np.exp(-zmin / gctau) - np.exp(-zmax / gctau))
def GammaCTau(E, m, eps):
return 65. * (E/8.) * pow(1.e-5 / eps, 2) * pow(0.1/m, 2)
label = ""
options, remainder = getopt.gnu_getopt(sys.argv[1:], 'hl:')
# Parse the command line arguments
for opt, arg in options:
if opt=='-l':
label = arg
if opt=='-h':
print_usage()
sys.exit(0)
gStyle.SetOptStat(0)
c = TCanvas("c","c",800,600)
outfile = remainder[0]
ebeam = 8. #GeV
zmin = 43. #cm
zmax = 315. #cm
#zmin = 43 #cm
#zmax = 84 #cm
eot = 1.e16
minSignal = 14
NepsBins = 100
epsmin = -7
epsmax = -2
nMass = 100
#massmin = 10
#massmax = 1000
massmin = -2
massmax = 0
Medges = array.array('d')
Epsedges = array.array('d')
for i in range(0,nMass+1):
#Medges.append(massmin/1.e3+(i-0.5)*(massmax/1.e3-massmin/1.e3)/float(nMass-1))
Medges.append(10**(massmin+(i-0.5)*(massmax-massmin)/float(nMass-1)))
for j in range(0,NepsBins+1):
Epsedges.append(10**(epsmin+(j-0.5)*(epsmax-epsmin)/float(NepsBins-1)))
NAprime = TH2F("NAprime", "NAprime", nMass, Medges, NepsBins, Epsedges)
GamCTau = TH2F("GamCTau", "GamCTau", nMass, Medges, NepsBins, Epsedges)
detectable = TH2F("detectable", "detectable", nMass, Medges, NepsBins, Epsedges)
for i in range(0, nMass):
#mass = (massmax - massmin)/float(nMass - 1) * i + massmin
logmass = (massmax - massmin)/float(nMass - 1) * i + massmin
mass = 10**logmass
#massArr.append(mass)
for j in range(0, NepsBins):
logeps = (epsmax - epsmin)/float(NepsBins - 1) * j + epsmin
eps = 10**logeps
#epsarr.append(eps)
Naprime = N_ap(mass, eps, eot)
gctau = GammaCTau(ebeam, mass, eps)
nsig = N_sig(Naprime, zmin, zmax, gctau)
#print(nsig)
NAprime.Fill(mass, eps, Naprime)
GamCTau.Fill(mass, eps, gctau)
detectable.Fill(mass, eps, nsig)
openPDF(outfile,c)
c.SetLogx(1)
c.SetLogy(1)
c.SetLogz(1)
NAprime.Draw("COLZ")
NAprime.SetTitle("Number of A's Produced, {0:.0f} GeV Beam, {1} EOT {2}".format(ebeam, eot, label))
NAprime.GetXaxis().SetTitle("mass [GeV] ")
NAprime.GetYaxis().SetTitle("#epsilon")
c.Print(outfile+".pdf")
GamCTau.Draw("COLZ")
GamCTau.SetTitle("Gamma CTau, {0:.0f} GeV Beam {1}".format(ebeam, label))
GamCTau.GetXaxis().SetTitle("mass [GeV] ")
GamCTau.GetYaxis().SetTitle("#epsilon")
c.Print(outfile+".pdf")
detectable.Draw("COLZ")
detectable.SetTitle("Number of Signal Events, {0:.0f} GeV Beam, {1} EOT {2}".format(ebeam, eot, label))
detectable.GetXaxis().SetTitle("mass [GeV] ")
detectable.GetYaxis().SetTitle("#epsilon")
c.Print(outfile+".pdf")
nlevels = 1
contour = array.array('d')
contour.append(minSignal)
detectable.SetContour(1, contour)
detectable.Draw("cont2")
detectable.SetTitle("Contour for {0} Signal Events, {1:.0f} GeV Beam, {2} EOT {3}".format(minSignal, ebeam, eot, label))
detectable.GetXaxis().SetTitle("mass [GeV] ")
detectable.GetYaxis().SetTitle("#epsilon")
c.Print(outfile+".pdf")
closePDF(outfile,c)
```
#### File: jmlazaro25/vissig/split.py
```python
import os
import ROOT as r
from glob import glob
r.gSystem.Load('libFramework.so')
def load(fil,treeName='LDMX_Events'):
# Load ROOT tree
twee = r.TChain(treeName)
twee.Add(fil)
return twee
def main():
# Not triggered (we'll want a different one than for stan)
gs = ('pn','0.001','0.01','0.1','1.0')
bkg_train_min = 1_250_000
sig_train_min = 312_500
indir = '/nfs/slac/g/ldmx/users/jmlazaro/samples/v3/4gev/vissig/train'
outdir = '/nfs/slac/g/ldmx/users/jmlazaro/samples/v3/4gev/vissig/test'
for g in gs:
files = glob( indir + '/{}/*.root'.format(g))
n_evs = [ load(f).GetEntries() for f in files ]
if g == gs[0]: train_min = bkg_train_min
train_min = sig_train_min
# Report some stuff
print('\n\n\n{}: '.format(g))
print('n_evs: {}'.format(n_evs))
print('min:',train_min)
for i in range( len( files ) ):
if sum( n_evs[:i+1] ) >= train_min:
ci = i
break
# Get number need of events still needed for training
cut = train_min - sum( n_evs[:ci] )
# Break up last file if needed
if cut > 0:
# Load original
ogTree = load( files[ci] )
# Put some in training
trainF = r.TFile( files[ci][:-5] + '_p1.root', 'RECREATE')
trainT = ogTree.CloneTree(0)
for entry in range(cut):
ogTree.GetEntry(entry)
trainT.Fill()
trainF.cd()
trainT.Write()
trainF.Close()
# Put some in testing
testF = r.TFile(
'{}/{}/'.format(outdir, g) \
+ files[ci].split('/')[-1][:-5] + '_p2.root',
'RECREATE'
)
testT = ogTree.CloneTree(0)
for entry in range( cut, ogTree.GetEntries() ):
ogTree.GetEntry(entry)
testT.Fill()
testF.cd()
testT.Write()
testF.Close()
# Move original into 'cut' directory to avoid confusion
cutdir = indir + '/../cut'
if not os.path.exists(cutdir): os.makedirs(cutdir)
print('mv {} {}'.format(files[ci], cutdir))
os.system( 'mv {} {}'.format(files[ci], cutdir) )
# Move all others into testing
if not os.path.exists( f'{outdir}/{g}' ): os.makedirs( f'{outdir}/{g}' )
for f in files[ci+1:]:
print('mv {} {}/{}'.format(f, outdir, g))
os.system( 'mv {} {}/{}'.format(f, outdir, g) )
if __name__ == '__main__': main()
``` |
{
"source": "jmlazaro25/wab",
"score": 2
} |
#### File: jmlazaro25/wab/bdtvhcal.py
```python
import ROOT as r
import numpy as np
from array import array
import mods.ROOTmanager as manager
#from loguru import logger
r.gSystem.Load( '/home/jmlazaro/research/ldmx-sw-v2.3.0/install/lib/libEvent.so' )
<EMAIL>
def main():
# Inputs and their trees and stuff
pdict = manager.parse()
inlist = pdict[ 'inlist' ]
outlist = pdict[ 'outlist' ]
group_labels = pdict[ 'groupls' ]
maxEvents = pdict[ 'maxEvents' ]
x_title = 'Maximum photoelectrons in an HCal Hit'
y_title = 'Gabrielle discriminator value'
# present as condition in https://github.com/LDMX-Software/Hcal/blob/9f968153cfe5683c94682c49f09451cce3b8cd25/src/Hcal/HcalVetoProcessor.cxx#L60-L72
print( 'input group: {}'.format( inlist[0] ) ) # remove
tree = manager.load(inlist[0], treeName='EcalVeto')
# Construct tree processes
procs = []
for gl, group in zip( group_labels, inlist ):
procs.append(
manager.TreeProcess(
event_process, group, ID=gl, tree = tree
)
)
# Histograms, bramches, and stuff
for proc in procs:
# Misc
print( 'Running %s' % ( proc.ID ) )
proc.events_used = 0 # Left as a reminder that we might want to exclude some events
proc.hists = {}
#print(proc.tree.Print()) # remove
# Histos
bdtVhcal = manager.Histogram(r.TH2D(proc.ID,\
' ;' + x_title + ';' + y_title,
20,0,100 , 20,0.95,1 ))
#100,0,100 , 20,0.95,1 ))
#100,0,100 , 100,0,100 ))
proc.hists[ 'bdtVhcal' ] = bdtVhcal
# RUN
proc.run( maxEvents=maxEvents )
# Gold
plot( procs, outlist[ 0 ] + '/' + group_labels [ 0 ], x_title, y_title )
manager.rmScratch()
print( '\nDone!\n' )
<EMAIL>
def event_process( self ):
# Collect data
self.hists[ 'bdtVhcal' ].hist.Fill(
self.tree.maxPE, self.tree.discValue_gabrielle
#self.tree.maxPE, self.tree.maxPE
)
def plot( processes, output, xTitle='x', yTitle='y' ):
c = r.TCanvas( "c", "", 900, 900 )
c.SetTitle( ' ;' + xTitle + ';' + yTitle )
c.SetLeftMargin( 0.15 )
c.SetRightMargin( 0.15 )
# Stylize and draw
for proc in processes:
for key in proc.hists:
r.gStyle.SetOptStat( 0 )
c.SetLogz()
proc.hists[ key ].hist.SetMinimum( 1 )
proc.hists[ key ].hist.Draw( "colz" )
#print('\nUsed %s events\n' % (proc.events_used) )
# Overall style
#r.gStyle.SetOptTitle(0)
# Save as pdf and png
c.SaveAs( output + '.pdf' )
if __name__ == "__main__":
main()
```
#### File: wab/mods/ROOTmanager.py
```python
import os
import sys
import ROOT as r
import numpy as np # ?
#TODO: Make options for no output or based on input
#TODO: Make nolists independant for in and out
###################################
# Constants
###################################
# ROOT colors
colors = {
'kBlue': 600,
'kGreen': 417,
'kMagenta': 616,
'kOrange': 807, # +7
'kBlack': 1,
'kYellow': 400,
'kViolet': 880,
'kRed': 632,
'kCyan': 432
}
# For easier loops
color_list = [colors[key] for key in colors]
# ROOT 1D/2D line styles
lineStyles = {
'kSolid': 1,
'kDashed': 2,
'kDotted': 3,
'kDashDotted': 4
}
# For easier loops
lineStyle_list = [i for i in range(1,11)]
###################################
# Classes
###################################
class TreeProcess:
# For analysing .root samples
def __init__(self, event_process, group=[], tree=None, tree_name = None, ID = '',\
color=1, strEvent=0, maxEvents=-1, pfreq=1000, batch=False, extrafs=None):
print('\nPreparing {}'.format(ID))
self.event_process = event_process
self.group_files = group
self.tree = tree
self.tree_name = tree_name
self.ID = ID
self.color = color
self.strEvent = strEvent
self.maxEvents = maxEvents
self.pfreq = pfreq
self.batch = batch
self.extrafs = extrafs
self.cwd = os.getcwd()
# Build tree amd move operations to a scratch directory
# if providing group_files instead of a tree
self.mvd = False
if self.tree == None:
self.mvd = True
# Create the scratch directory if it doesn't already exist
scratch_dir = self.cwd + '/scratch'
print( 'Using scratch path %s' % scratch_dir )
if not os.path.exists(scratch_dir):
os.makedirs(scratch_dir)
# Get tmp num
num=0; check = True
while check:
if os.path.exists( scratch_dir+'/tmp_'+str(num) ):
num += 1
else:
check = False
# Create and mv into tmp directory that can be used to copy files into
if self.batch:
self.tmp_dir='%s/%s' % (scratch_dir, os.environ['LSB_JOBID'])
else:
self.tmp_dir = '%s/%s' % (scratch_dir, 'tmp_'+str(num))
if not os.path.exists(self.tmp_dir):
print( 'Creating tmp directory %s' % self.tmp_dir )
os.makedirs(self.tmp_dir)
os.chdir(self.tmp_dir)
# Copy input files to the tmp directory
print( 'Copying input files into tmp directory' )
for rfilename in self.group_files:
os.system("cp %s ." % rfilename )
os.system("ls .")
# Just get the file names without the full path
tmpfiles = [f.split('/')[-1] for f in self.group_files]
# Load'em
if self.tree_name != None:
self.tree = load(tmpfiles, self.tree_name)
else:
self.tree = load(tmpfiles)
# Move back to cwd in case running multiple procs
os.chdir(self.cwd)
def addBranch(self, ldmx_class, branch_name):
# Add a new branch to read from
if self.tree == None:
sys.exit('Set tree')
if ldmx_class == 'EventHeader': branch = r.ldmx.EventHeader()
elif ldmx_class == 'EcalVetoResult': branch = r.ldmx.EcalVetoResult()
elif ldmx_class == 'HcalVetoResult': branch = r.ldmx.HcalVetoResult()
elif ldmx_class == 'TriggerResult': branch = r.ldmx.TriggerResult()
elif ldmx_class == 'SimParticle': branch = r.map(int, 'ldmx::'+ldmx_class)()
else: branch = r.std.vector('ldmx::'+ldmx_class)()
self.tree.SetBranchAddress(branch_name,r.AddressOf(branch))
return branch
def run(self, strEvent=0, maxEvents=-1, pfreq=1000):
# Process events
if strEvent != 0: self.strEvent = strEvent
if maxEvents != -1: self.maxEvents = maxEvents
if self.maxEvents == -1 or self.strEvent + self.maxEvents > self.tree.GetEntries():
self.maxEvents = self.tree.GetEntries() - self.strEvent
maxEvent = self.strEvent + self.maxEvents
if pfreq != 1000: self.pfreq = pfreq
self.event_count = self.strEvent
while self.event_count < maxEvent:
self.tree.GetEntry(self.event_count)
if self.event_count%self.pfreq == 0:
print('Processing Event: %s'%(self.event_count))
self.event_process(self)
self.event_count += 1
# Execute any closing function(s) (might impliment *args, **kwargs later)
if self.extrafs != None:
for extraf in self.extrafs:
extraf()
# Move back to cwd in case running multiple procs
os.chdir(self.cwd)
# Remove tmp directory if created in move
if self.mvd:
print( 'Removing tmp directory %s' % self.tmp_dir )
os.system('rm -rf %s' % self.tmp_dir)
class TreeMaker:
# To write a tree in an analysis process
def __init__(self, outfile, tree_name, branches_info = {}, outdir=''):
self.outfile = outfile
self.tree_name = tree_name
self.branches_info = branches_info
self.branches = {}
self.outdir = outdir
# Create output file and tree
self.tfout = r.TFile(self.outfile,"RECREATE")
self.tree = r.TTree(tree_name, tree_name)
# Set up new tree branches if given branches_info
if len(branches_info) != 0:
for branch_name in branches_info:
self.addBranch(self.branches_info[branch_name]['rtype'],\
self.branches_info[branch_name]['default'],\
branch_name)
def addBranch(self, rtype, default_value, branch_name):
# Add a new branch to write to
self.branches_info[branch_name] = {'rtype': rtype, 'default': default_value}
self.branches[branch_name] = np.zeros(1, dtype=rtype)
if str(rtype) == "<type 'float'>" or str(rtype) == "<class 'float'>":
self.tree.Branch(branch_name, self.branches[branch_name], branch_name + "/D")
elif str(rtype) == "<type 'int'>" or str(rtype) == "<class 'int'>":
self.tree.Branch(branch_name, self.branches[branch_name], branch_name + "/I")
# ^ probably use cases based on rtype to change the /D if needed?
def resetFeats(self):
# Reset variables to defaults for new event
# Return because feats['feat'] looks nicer than self.tfMaker.feats['feat']
feats = {}
for branch_name in self.branches_info:
feats[branch_name] = self.branches_info[branch_name]['default']
return feats
def fillEvent(self, feats):
# Fill the tree with new feature values
for feat in feats:
self.branches[feat][0] = feats[feat]
self.tree.Fill()
def wq(self):
# Save the tree and close the file
self.tfout.Write(self.tree_name)
self.tfout.Close()
if self.outdir != '':
if not os.path.exists(self.outdir):
print( 'Creating %s' % (self.outdir) )
os.makedirs(self.outdir)
print( 'cp %s %s' % (self.outfile,self.outdir) )
os.system('cp %s %s' % (self.outfile,self.outdir))
class Histogram:
# Just to hold histogram-related stuff and make other py code nicer
def __init__(self, hist, title='', xlabel='x', ylabel='y',\
color=1, lineStyle=1, fillStyle=1):
self.hist = hist
self.title = title
self.xlabel = xlabel
self.ylabel = ylabel
self.color = color
self.lineStyle = lineStyle
self.fillStyle = fillStyle
###################################
# Functions
###################################
def parse(nolist = False):
import glob
import argparse
# Arguments
parser = argparse.ArgumentParser()
parser.add_argument('--batch', action='store_true', dest='batch', default=False,
help='Run in batch mode [Default: False]')
parser.add_argument('--sep', action='store_true', dest='separate', default = False,
help='separate events into different files [Default: False]')
parser.add_argument('-i', nargs='+', action='store', dest='infiles', default=[],
help='input file(s)')
parser.add_argument('--indirs', nargs='+', action='store', dest='indirs', default=[],
help='Director(y/ies) of input files')
parser.add_argument('-g','-groupls', nargs='+', action='store', dest='group_labels',
default='', help='Human readable sample labels e.g. for legends')
parser.add_argument('-o','--out', nargs='+', action='store', dest='out', default=[],
help='output files or director(y/ies) of output files')
# if inputting directories, it's best to make a system
# for naming files in main() of main script
parser.add_argument('--notlist', action='store_true', dest='nolist',
help="return things without lists (to make things neater for 1 sample runs")
parser.add_argument('-s','--start', type=int, action='store', dest='startEvent',
default=0, help='event to start at')
parser.add_argument('-m','--max', type=int, action='store', dest='maxEvents',
default=-1, help='max events to run over for EACH group')
args = parser.parse_args()
# Input
if args.infiles != []:
inlist = [[f] for f in args.infiles] # Makes general loading easier
if nolist or args.nolist == True:
inlist = inlist[0]
elif args.indirs != []:
inlist = [glob.glob(indir + '/*.root') for indir in args.indirs]
if nolist or args.nolist == True:
inlist = inlist[0]
else:
sys.exit('provide input')
# Output
if args.out != []:
outlist = args.out
if nolist or args.nolist == True:
outlist = outlist[0]
else:
sys.exit('provide output')
pdict = {
'batch': args.batch,
'separate': args.separate,
'inlist': inlist,
'groupls': args.group_labels,
'outlist': outlist,
'startEvent': args.startEvent,
'maxEvents': args.maxEvents
}
return pdict
# Load a tree from a group of input files
def load(group,treeName='LDMX_Events'):
# Load a group of files into a readable tree
tree = r.TChain(treeName)
for f in group:
tree.Add(f)
return tree
# Remove scratch dir
def rmScratch():
if os.path.exists('./scratch'):
print( '\nRemoving scratch directory' )
os.system('rm -rf ./scratch')
```
#### File: wab/plotting/plotTools.py
```python
import ROOT as rt
from array import array
def computeEffVsCutGraph( hist, reversecutdir=False ):
# hist has to be TH1 derivative
nbins = hist.GetNbinsX()
xbins = array( 'd', [ 0.0 ] * nbins )
for ibin in range( nbins ):
xbins[ ibin ] = hist.GetBinLowEdge( ibin + 1 )
npasssig = array( 'd', [ 0.0 ] * nbins )
effsig = array( 'd', [ 0.0 ] * nbins )
sigtotal = hist.Integral( 0, nbins + 1 )
for ibin in range( nbins ):
if reversecutdir:
npasssig[ ibin ] = hist.Integral( 0, ibin )
else:
npasssig[ ibin ] = hist.Integral( ibin, nbins + 1 )
effsig[ ibin ] = npasssig[ ibin ] / sigtotal
gr = rt.TGraph( nbins, xbins, effsig )
gr.SetTitle( '' )
gr.GetXaxis().SetTitle( hist.GetXaxis().GetTitle() )
gr.GetYaxis().SetTitle( 'Efficiency' )
return gr
def getCutValueForEfficiency( hist, targeteff, reversecutdir=False ):
nbins = hist.GetNbinsX()
xbins = array( 'd', [ 0.0 ] * ( nbins + 1 ) )
binw = array( 'd', [ 0.0 ] * nbins )
for ibin in range( nbins ):
xbins[ ibin ] = hist.GetBinLowEdge( ibin + 1 )
binw[ ibin ] = hist.GetBinWidth( ibin + 1 )
xbins[ nbins ] = xbins[ nbins - 1 ] + binw[ nbins - 1 ]
npass = array( 'd', [ 0.0 ] * nbins )
eff = array( 'd', [ 0.0 ] * nbins )
total = hist.Integral( 0, nbins + 1 )
effdiff = 1.0
nbin = -1
for ibin in range( nbins ):
if reversecutdir:
npass[ ibin ] = hist.Integral( 0, ibin )
else:
npass[ ibin ] = hist.Integral( ibin, nbins + 1 )
eff[ ibin ] = npass[ ibin ] / total
tmpdiff = abs( eff[ ibin ] - targeteff )
if tmpdiff < effdiff:
effdiff = tmpdiff
nbin = ibin
return ( xbins[ nbin ], eff[ nbin ] )
def getEfficiencyForCutValue( hist, cut, reversecutdir=False ):
nbins = hist.GetNbinsX()
xbins = array( 'd', [ 0.0 ] * ( nbins + 1 ) )
binw = array( 'd', [ 0.0 ] * nbins )
for ibin in range( nbins ):
xbins[ ibin ] = hist.GetBinLowEdge( ibin + 1 )
binw[ ibin ] = hist.GetBinWidth( ibin + 1 )
xbins[ nbins ] = xbins[ nbins - 1 ] + binw[ nbins - 1 ]
npass = array( 'd', [ 0.0 ] * nbins )
eff = array( 'd', [ 0.0 ] * nbins )
total = hist.Integral( 0, nbins + 1 )
diff = 1.0
nbin = -1
for ibin in range( nbins ):
if reversecutdir:
npass[ ibin ] = hist.Integral( 0, ibin )
else:
npass[ ibin ] = hist.Integral( ibin, nbins + 1 )
eff[ ibin ] = npass[ ibin ] / total
tmpdiff = abs( xbins[ ibin ] - cut )
if tmpdiff < diff:
diff = tmpdiff
nbin = ibin
return ( eff[ nbin ], xbins[ nbin ] )
```
#### File: wab/plotting/plotVariables.py
```python
import os
import sys
import ast
from loguru import logger
import plotTools as pt
import styleTools as st
from collections import OrderedDict
from configparser import ConfigParser
from ROOT import gROOT, gStyle, gSystem, TFile, TTree, TH1, TH1D, TH2D, TGraph, TCanvas, TLegend, TLine
@logger.catch
def main():
# Default conf file, if running with a different one just provide it as an argument
configFile = 'plots.conf'
args = sys.argv[ 1: ]
if len( args ) >= 1:
configFile = args[ 0 ]
if os.path.exists( configFile ):
print( 'running with config file', configFile )
else:
print(
'you are trying to use a config file (' + configFile +
') that does not exist!'
)
sys.exit( 1 )
# Parse the conf file
cfg = ConfigParser( dict_type=OrderedDict )
cfg.optionxform = str
cfg.read( configFile )
# Set up plotting
inputdir = cfg.get( 'setup', 'inputdir' )
outputdir = cfg.get( 'setup', 'outputdir' )
procs = cfg.get( 'setup', 'processes' ).replace( ' ', '' ).split( ',' )
treename = cfg.get( 'setup', 'treename' )
treename2d = cfg.get( 'setup', 'treename2d' )
comparetype = cfg.get( 'setup', 'comparetype' )
if not comparetype in [ 'processes', 'sels' ]:
print( 'comparetype must be either "processes" or "sels"!' )
sys.exit( 1 )
plotnames = cfg.get( 'plotting', 'plotnames' ).replace( ' ',
'' ).split( ',' )
plotnames2d = cfg.get( 'plotting',
'plotnames2d' ).replace( ' ', '' ).split( ',' )
effplotnames = cfg.get( 'plotting',
'effplotnames' ).replace( ' ', '' ).split( ',' )
reversecutvars = cfg.get( 'plotting',
'reversecutvars' ).replace( ' ',
'' ).split( ',' )
logvars = cfg.get( 'plotting', 'logvars' ).replace( ' ', '' ).split( ',' )
expr = { k: v for k, v in cfg[ 'expressions' ].items() }
sel = { k: v for k, v in cfg[ 'sels' ].items() }
sel2d = { k: v for k, v in cfg[ 'sels2d' ].items() }
proclabels = { k: v for k, v in cfg[ 'proclabels' ].items() }
plotlabels = { k: v for k, v in cfg[ 'plotlabels' ].items() }
binning = { k: ast.literal_eval( v ) for k, v in cfg[ 'binning' ].items() }
colors = { k: st.colors[ v ] for k, v in cfg[ 'colors' ].items() }
# ROOT setup stuff
gROOT.SetBatch( True )
st.SetTDRStyle()
st.SetupColors()
gStyle.SetLabelSize( 0.03, "XYZ" )
gSystem.mkdir( outputdir, True )
print( 'Making plots!' )
files = {
proc: TFile.Open( inputdir + '/' + proc + '_eval.root' )
for proc in procs
}
# Loop over 1D variables to be plotted
for n in plotnames:
if n == '' or not n in binning:
continue
print( 'plotting', n )
( outerloopitems, innerloopitems
) = ( procs, sel ) if comparetype == 'sels' else ( sel, procs )
# Loop over outer loop items
for x in outerloopitems:
hists = []
ymax = -1.
infile = None
tree = None
selexp = ''
if comparetype == 'processes':
selexp = sel[ x ]
print( 'with selection', selexp )
# Loop over inner loop items
for y in innerloopitems:
if comparetype == 'sels':
infile = files[ x ]
selexp = sel[ y ]
print( 'with selection', selexp )
else:
infile = files[ y ]
tree = infile.FindObjectAny( treename )
hist = TH1D(
'_'.join( [ 'h', n, x, y ] ), '', binning[ n ][ 0 ],
binning[ n ][ 1 ], binning[ n ][ 2 ]
)
hist.SetLineColor( colors[ y ] )
# Check if variable name corresponds to an expression
if n in expr:
tree.Draw(
expr[ n ] + '_'.join( [ '>>h', n, x, y ] ), selexp,
'histnorm'
)
else:
tree.Draw(
n + '_'.join( [ '>>h', n, x, y ] ), selexp, 'histnorm'
)
# Histogram setup
st.addOverFlow( hist )
hist.GetXaxis().SetTitle( plotlabels[ n ] )
hist.GetXaxis().SetTitleSize( 0.04 )
hist.GetYaxis().SetTitle( "A.U." )
hist.GetYaxis().SetTitleSize( 0.05 )
hist.SetLineWidth( 3 )
hist.SetLineColor( colors[ y ] )
if ( hist.GetMaximum() > ymax ):
ymax = hist.GetMaximum()
tempmax = 0.0
maxbin = 0
for ibin in range( 1, hist.GetNbinsX() + 1 ):
binc = hist.GetBinContent( ibin )
if binc > tempmax:
tempmax = binc
maxbin = hist.GetBinLowEdge( ibin )
# Add this histogram to the list
hists.append( hist )
# Setup canvas
c = st.MakeCanvas( "c", "", 600, 600 )
leg = TLegend( 0.5, 0.65, 0.95, 0.85 )
#leg = TLegend( 0.3, 0.6, 0.5, 0.9 )
st.SetLegendStyle( leg )
leg.SetTextSize( 0.04 )
# Check if plotting in log scale
logy = ( n in logvars )
if ( logy ):
c.SetLogy()
# Draw histograms and add each entry to the legend
for hist in hists:
if ( logy ):
hist.GetYaxis().SetRangeUser( 1e-04, 10 * ymax )
else:
hist.GetYaxis().SetRangeUser( 0, 1.2 * ymax )
hist.Draw( 'histsame' )
hname = str( hist.GetName() )
labelname = hname.split( list( sel )[ 0 ] + '_' )[ -1 ]
leg.AddEntry( hist, proclabels[ labelname ], 'L' )
# Draw legend and save the plot
leg.Draw( 'same' )
st.LDMX_lumi( c, 0, 'Simulation' )
c.SaveAs(
outputdir + '/' + n + '_' + x +
( '_log.pdf' if logy else '.pdf' )
)
# Loop over 2D plots
for n in plotnames2d:
if n == '':
continue
xvar = n[ n.rindex( '_' ) + 1: ]
yvar = n[ 0:n.index( '_' ) ]
if not xvar in binning or not yvar in binning:
continue
print( 'plotting', yvar, 'vs', xvar )
# Loop over processes
for proc in procs:
infile = files[ proc ]
tree = infile.FindObjectAny( treename2d )
c = st.MakeCanvas( 'c', '', 600, 600 )
# Loop over cut strings
for seln in sel2d:
selexp = sel2d[ seln ]
print( 'with selection', selexp )
hist = TH2D(
'_'.join( [ 'h', n, proc, seln ] ), '',
binning[ xvar ][ 0 ], binning[ xvar ][ 1 ],
binning[ xvar ][ 2 ], binning[ yvar ][ 0 ],
binning[ yvar ][ 1 ], binning[ yvar ][ 2 ]
)
c.cd()
logx, logy = False, False
if xvar in logvars:
logx = True
if yvar in logvars:
logy = True
if logx:
c.SetLogx()
if logy:
c.SetLogy()
c.SetLogz()
c.SetLeftMargin( 0.13 )
c.SetRightMargin( 0.18 )
print( 'Drawing', expr[ n ] )
if n in expr:
#tree.Draw(expr[n]+'_'.join(['>>h',n,proc,seln]),selexp,'COLZnorm')
tree.Draw(
expr[ n ] + '_'.join( [ '>>h', n, proc, seln ] ),
selexp, 'COLZ'
)
else:
#tree.Draw(n+'_'.join(['>>h',n,proc,seln]),selexp,'COLZnorm')
tree.Draw(
n + '_'.join( [ '>>h', n, proc, seln ] ), selexp,
'COLZ'
)
# Histogram setup
hist.GetXaxis().SetTitle( plotlabels[ xvar ] )
hist.GetXaxis().SetTitleSize( 0.05 )
hist.GetYaxis().SetTitle( plotlabels[ yvar ] )
hist.GetYaxis().SetTitleSize( 0.05 )
# Save plot
c.SaveAs(
outputdir + '/' + n + '_' + proc + '_' + seln +
( '_log.pdf' if logx or logy else '.pdf' )
)
# Loop over efficiency variables to be plotted
for n in effplotnames:
if n == '' or not n in binning:
continue
print( 'plotting efficiency vs', n )
( outerloopitems, innerloopitems
) = ( procs, sel ) if comparetype == 'sels' else ( sel, procs )
# Loop over outer loop items
for x in outerloopitems:
hists = []
effs = []
infile = None
tree = None
selexp = ''
if comparetype == 'sels':
infile = files[ x ]
tree = infile.FindObjectAny( treename )
else:
selexp = sel[ x ]
print( 'with selection', selexp )
isel = -1
# Loop over inner loop items
for y in innerloopitems:
if comparetype == 'sels':
selexp = sel[ y ]
print( 'with selection', selexp )
else:
infile = files[ y ]
tree = infile.FindObjectAny( treename )
isel += 1
hist = TH1D(
'_'.join( [ 'h', n, x, y ] ), '', binning[ n ][ 0 ],
binning[ n ][ 1 ], binning[ n ][ 2 ]
)
hist.SetLineColor( colors[ y ] )
hist.SetMarkerColor( colors[ y ] )
# Check if variable name corresponds to an expression
if n in expr:
tree.Draw(
expr[ n ] + '_'.join( [ '>>h', n, x, y ] ), selexp
)
else:
tree.Draw( n + '_'.join( [ '>>h', n, x, y ] ), selexp )
# Histogram setup
st.addOverFlow( hist )
hist.GetXaxis().SetTitle( plotlabels[ n ] )
hists.append( hist )
# Setup canvas
c = st.MakeCanvas( "c", "", 600, 600 )
leg = TLegend( 0.65, 0.7, 0.95, 0.9 )
st.SetLegendStyle( leg )
leg.SetTextSize( 0.04 )
logy = ( n in logvars )
if ( logy ):
c.SetLogy()
if len( hists ):
xmin = hists[ 0 ].GetXaxis().GetXmin()
xmax = hists[ 0 ].GetXaxis().GetXmax()
c.DrawFrame( xmin, 1e-6 if logy else 0, xmax, 1.1 )
graphs = []
emptyhist = None
ihist = -1
for hist in hists:
ihist += 1
hname = str( hist.GetName() )
labelname = hname[ hname.rindex( '_' ) + 1: ]
xmin = hist.GetXaxis().GetXmin()
xmax = hist.GetXaxis().GetXmax()
effgr = pt.computeEffVsCutGraph( hist, n in reversecutvars )
effgr.SetLineWidth( 3 )
effgr.SetLineColor( colors[ labelname ] )
effgr.SetMarkerColor( colors[ labelname ] )
effgr.GetXaxis().SetTitle( plotlabels[ n ] )
effgr.GetYaxis().SetTitle( 'Efficiency' )
effgr.GetXaxis().SetTitleSize( 0.04 )
effgr.GetYaxis().SetTitleSize( 0.05 )
effgr.GetHistogram().GetXaxis().SetLimits( xmin, xmax )
effgr.GetHistogram().GetYaxis().SetRangeUser(
1e-6 if logy else 0.0, 1.1
)
c.cd()
effgr.Draw( 'Csame' )
if ihist == 0:
emptyhist = effgr.GetHistogram()
leg.AddEntry( effgr, proclabels[ labelname ], 'L' )
graphs.append( effgr )
if emptyhist:
emptyhist.Draw( 'AXIS' )
for graph in graphs:
graph.Draw( 'Csame' )
c.cd()
leg.Draw( 'same' )
st.LDMX_lumi( c, 0, 'Simulation' )
c.SaveAs( outputdir + '/eff_vs_' + n + '_' + y + '.pdf' )
for infile in files.values():
infile.Close()
if __name__ == '__main__': main()
``` |
{
"source": "jmlcv/vspk-examples",
"score": 2
} |
#### File: vspk-examples/python/count_vports_with_vss.py
```python
from builtins import str
import argparse
import getpass
import logging
import json
from prettytable import PrettyTable
from vspk import v6 as vsdk
def get_args():
"""
Supports the command-line arguments listed below.
"""
parser = argparse.ArgumentParser(
description="vm_delete is a tool to delete a VM that was created with split activation.")
parser.add_argument('-d', '--debug', required=False,
help='Enable debug output', dest='debug', action='store_true')
parser.add_argument('-j', '--json', required=False,
help='Print as JSON, not as a table', dest='json_output', action='store_true')
parser.add_argument('-l', '--log-file', required=False,
help='File to log to (default = stdout)', dest='logfile', type=str)
parser.add_argument('-E', '--nuage-enterprise', required=True,
help='The enterprise with which to connect to the Nuage VSD/SDK host', dest='nuage_enterprise', type=str)
parser.add_argument('-H', '--nuage-host', required=True,
help='The Nuage VSD/SDK endpoint to connect to', dest='nuage_host', type=str)
parser.add_argument('-p', '--nuage-password', required=False,
help='The password with which to connect to the Nuage VSD/SDK host. If not specified, the user is prompted at runtime for a password', dest='nuage_password', type=str)
parser.add_argument('-P', '--nuage-port', required=False,
help='The Nuage VSD/SDK server port to connect to (default = 8443)', dest='nuage_port', type=int, default=8443)
parser.add_argument('-U', '--nuage-user', required=True,
help='The username with which to connect to the Nuage VSD/SDK host', dest='nuage_username', type=str)
parser.add_argument('-v', '--verbose', required=False,
help='Enable verbose output', dest='verbose', action='store_true')
args = parser.parse_args()
return args
def main():
"""
Main function to handle statistics
"""
# Handling arguments
args = get_args()
debug = args.debug
json_output = args.json_output
log_file = None
if args.logfile:
log_file = args.logfile
nuage_enterprise = args.nuage_enterprise
nuage_host = args.nuage_host
nuage_port = args.nuage_port
nuage_password = None
if args.nuage_password:
nuage_password = args.nuage_password
nuage_username = args.nuage_username
verbose = args.verbose
# Logging settings
if debug:
log_level = logging.DEBUG
elif verbose:
log_level = logging.INFO
else:
log_level = logging.WARNING
logging.basicConfig(
filename=log_file, format='%(asctime)s %(levelname)s %(message)s', level=log_level)
logger = logging.getLogger(__name__)
# Getting user password for Nuage connection
if nuage_password is None:
logger.debug(
'No command line Nuage password received, requesting Nuage password from user')
nuage_password = getpass.getpass(
prompt='Enter password for Nuage host {0:s} for user {1:s}: '.format(nuage_host, nuage_username))
try:
# Connecting to Nuage
logger.info('Connecting to Nuage server %s:%s with username %s' %
(nuage_host, nuage_port, nuage_username))
nc = vsdk.NUVSDSession(username=nuage_username, password=<PASSWORD>,
enterprise=nuage_enterprise, api_url="https://{0:s}:{1:d}".format(nuage_host, nuage_port))
nc.start()
except Exception as e:
logger.error('Could not connect to Nuage host {0:s} with user {1:s} and specified password'.format(
nuage_host, nuage_username))
logger.critical('Caught exception: {0:s}'.format(str(e)))
return 1
if json_output:
logger.debug('Setting up json output')
json_object = []
else:
logger.debug('Setting up basic output table')
pt = PrettyTable(['Enterprise', 'Domain', '# vPorts'])
logger.debug('Fetching enterprises with flow collection enabled')
for ent in nc.user.enterprises.get(filter='flowCollectionEnabled == "ENABLED"'):
logger.debug('Handling enterprise: {0:s}'.format(ent.name))
for dom in ent.domains.get(filter='flowCollectionEnabled == "INHERITED" OR flowCollectionEnabled == "ENABLED"'):
logger.debug('Handling domain: {0:s}'.format(dom.name))
_, _, vport_count = dom.vports.count()
if json_output:
json_dict = {
'Enterprise': ent.name,
'Domain': dom.name,
'# vPorts': vport_count
}
json_object.append(json_dict)
else:
logger.debug('Add row: {0:s}, {1:s}, {2:d}'.format(
ent.name, dom.name, vport_count))
pt.add_row([ent.name, dom.name, vport_count])
if json_output:
print(json.dumps(json_object, sort_keys=True, indent=4))
else:
print(pt)
return 0
# Start program
if __name__ == "__main__":
main()
```
#### File: vspk-examples/python/deploy_vsphere_template_with_nuage.py
```python
from __future__ import print_function
from builtins import str
from builtins import input
import argparse
import atexit
import getpass
import ipaddress
import logging
import os.path
import requests
from time import sleep
from pyVim.connect import SmartConnect, SmartConnectNoSSL, Disconnect
from pyVmomi import vim, vmodl
from vspk import v6 as vsdk
def get_args():
"""
Supports the command-line arguments listed below.
"""
parser = argparse.ArgumentParser(description="Deploy a template into into a VM with certain Nuage VSP metadata.")
parser.add_argument('-d', '--debug', required=False, help='Enable debug output', dest='debug', action='store_true')
parser.add_argument('-f', '--folder', required=False, help='The folder in which the new VM should reside (default = same folder as source virtual machine)', dest='folder', type=str)
parser.add_argument('-l', '--log-file', required=False, help='File to log to (default = stdout)', dest='logfile', type=str)
parser.add_argument('-n', '--name', required=True, help='The name of the VM to be created', dest='name', type=str)
parser.add_argument('--nuage-enterprise', required=True, help='The enterprise with which to connect to the Nuage VSD/SDK host', dest='nuage_enterprise', type=str)
parser.add_argument('--nuage-host', required=True, help='The Nuage VSD/SDK endpoint to connect to', dest='nuage_host', type=str)
parser.add_argument('--nuage-port', required=False, help='The Nuage VSD/SDK server port to connect to (default = 8443)', dest='nuage_port', type=int, default=8443)
parser.add_argument('--nuage-password', required=False, help='The password with which to connect to the Nuage VSD/SDK host. If not specified, the user is prompted at runtime for a password', dest='nuage_password', type=str)
parser.add_argument('--nuage-user', required=True, help='The username with which to connect to the Nuage VSD/SDK host', dest='nuage_username', type=str)
parser.add_argument('--nuage-vm-enterprise', required=False, help='The Nuage enterprise to which the VM should be connected', dest='nuage_vm_enterprise', type=str)
parser.add_argument('--nuage-vm-domain', required=False, help='The Nuage domain to which the VM should be connected', dest='nuage_vm_domain', type=str)
parser.add_argument('--nuage-vm-zone', required=False, help='The Nuage zone to which the VM should be connected', dest='nuage_vm_zone', type=str)
parser.add_argument('--nuage-vm-subnet', required=False, help='The Nuage subnet to which the VM should be connected', dest='nuage_vm_subnet', type=str)
parser.add_argument('--nuage-vm-ip', required=False, help='The IP the VM should have', dest='nuage_vm_ip', type=str)
parser.add_argument('--nuage-vm-user', required=False, help='The Nuage User owning the VM', dest='nuage_vm_user', type=str)
parser.add_argument('-P', '--disable-power-on', required=False, help='Disable power on of cloned VMs', dest='nopoweron', action='store_true')
parser.add_argument('-r', '--resource-pool', required=False, help='The resource pool in which the new VM should reside, (default = Resources, the root resource pool)', dest='resource_pool', type=str, default='Resources')
parser.add_argument('-S', '--disable-SSL-certificate-verification', required=False, help='Disable SSL certificate verification on connect', dest='nosslcheck', action='store_true')
parser.add_argument('-t', '--template', required=True, help='Template to deploy', dest='template', type=str)
parser.add_argument('--vcenter-host', required=True, help='The vCenter or ESXi host to connect to', dest='vcenter_host', type=str)
parser.add_argument('--vcenter-port', required=False, help='vCenter Server port to connect to (default = 443)', dest='vcenter_port', type=int, default=443)
parser.add_argument('--vcenter-password', required=False, help='The password with which to connect to the vCenter host. If not specified, the user is prompted at runtime for a password', dest='vcenter_password', type=str)
parser.add_argument('--vcenter-user', required=True, help='The username with which to connect to the vCenter host', dest='vcenter_username', type=str)
parser.add_argument('-v', '--verbose', required=False, help='Enable verbose output', dest='verbose', action='store_true')
args = parser.parse_args()
return args
def clear(logger):
"""
Clears the terminal
"""
if logger:
logger.debug('Clearing terminal')
os.system(['clear', 'cls'][os.name == 'nt'])
def find_vm(vc, logger, name):
"""
Find a virtual machine by its name and return it
"""
content = vc.content
obj_view = content.viewManager.CreateContainerView(content.rootFolder, [vim.VirtualMachine], True)
vm_list = obj_view.view
for vm in vm_list:
logger.debug('Checking virtual machine %s' % vm.name)
if vm.name == name:
logger.debug('Found virtual machine %s' % vm.name)
return vm
return None
def find_resource_pool(vc, logger, name):
"""
Find a resource pool by its name and return it
"""
content = vc.content
obj_view = content.viewManager.CreateContainerView(content.rootFolder, [vim.ResourcePool], True)
rp_list = obj_view.view
for rp in rp_list:
logger.debug('Checking resource pool %s' % rp.name)
if rp.name == name:
logger.debug('Found resource pool %s' % rp.name)
return rp
return None
def find_folder(vc, logger, name):
"""
Find a folder by its name and return it
"""
content = vc.content
obj_view = content.viewManager.CreateContainerView(content.rootFolder, [vim.Folder], True)
folder_list = obj_view.view
for folder in folder_list:
logger.debug('Checking folder %s' % folder.name)
if folder.name == name:
logger.debug('Found folder %s' % folder.name)
return folder
return None
def main():
"""
Manage the vCenter Integration Node configuration
"""
# Handling arguments
args = get_args()
debug = args.debug
folder_name = None
if args.folder:
folder_name = args.folder
log_file = None
if args.logfile:
log_file = args.logfile
name = args.name
nuage_enterprise = args.nuage_enterprise
nuage_host = args.nuage_host
nuage_port = args.nuage_port
nuage_password = None
if args.nuage_password:
nuage_password = args.nuage_password
nuage_username = args.nuage_username
nuage_vm_enterprise = None
if args.nuage_vm_enterprise:
nuage_vm_enterprise = args.nuage_vm_enterprise
nuage_vm_domain = None
if args.nuage_vm_domain:
nuage_vm_domain = args.nuage_vm_domain
nuage_vm_zone = None
if args.nuage_vm_zone:
nuage_vm_zone = args.nuage_vm_zone
nuage_vm_subnet = None
if args.nuage_vm_subnet:
nuage_vm_subnet = args.nuage_vm_subnet
nuage_vm_ip = None
if args.nuage_vm_ip:
nuage_vm_ip = args.nuage_vm_ip
nuage_vm_user = None
if args.nuage_vm_user:
nuage_vm_user = args.nuage_vm_user
power_on = not args.nopoweron
resource_pool_name = None
if args.resource_pool:
resource_pool_name = args.resource_pool
nosslcheck = args.nosslcheck
template = args.template
vcenter_host = args.vcenter_host
vcenter_port = args.vcenter_port
vcenter_password = None
if args.vcenter_password:
vcenter_password = args.vcenter_password
vcenter_username = args.vcenter_username
verbose = args.verbose
# Logging settings
if debug:
log_level = logging.DEBUG
elif verbose:
log_level = logging.INFO
else:
log_level = logging.WARNING
logging.basicConfig(filename=log_file, format='%(asctime)s %(levelname)s %(message)s', level=log_level)
logger = logging.getLogger(__name__)
# Getting user password for Nuage connection
if nuage_password is None:
logger.debug('No command line Nuage password received, requesting Nuage password from user')
nuage_password = getpass.getpass(prompt='Enter password for Nuage host %s for user %s: ' % (nuage_host, nuage_username))
# Getting user password for vCenter connection
if vcenter_password is None:
logger.debug('No command line vCenter password received, requesting vCenter password from user')
vcenter_password = getpass.getpass(prompt='Enter password for vCenter host %s for user %s: ' % (vcenter_host, vcenter_username))
try:
vc = None
nc = None
# Connecting to Nuage
try:
logger.info('Connecting to Nuage server %s:%s with username %s' % (nuage_host, nuage_port, nuage_username))
nc = vsdk.NUVSDSession(username=nuage_username, password=<PASSWORD>_password, enterprise=nuage_enterprise, api_url="https://%s:%s" % (nuage_host, nuage_port))
nc.start()
except IOError:
pass
if not nc or not nc.is_current_session():
logger.error('Could not connect to Nuage host %s with user %s, enterprise %s and specified password' % (nuage_host, nuage_username, nuage_enterprise))
return 1
# Connecting to vCenter
try:
logger.info('Connecting to vCenter server %s:%s with username %s' % (vcenter_host, vcenter_port, vcenter_username))
if nosslcheck:
vc = SmartConnectNoSSL(host=vcenter_host, user=vcenter_username, pwd=<PASSWORD>, port=int(vcenter_port))
else:
vc = SmartConnect(host=vcenter_host, user=vcenter_username, pwd=vcenter_password, port=int(vcenter_port))
except IOError:
pass
if not vc:
logger.error('Could not connect to vCenter host %s with user %s and specified password' % (vcenter_host, vcenter_username))
return 1
logger.info('Connected to both Nuage & vCenter servers')
logger.debug('Registering vCenter disconnect at exit')
atexit.register(Disconnect, vc)
# Verifying the Nuage Enterprise existence or selecting it
if nuage_vm_enterprise:
logger.debug('Finding Nuage enterprise %s' % nuage_vm_enterprise)
vm_enterprise = nc.user.enterprises.get_first(filter="name == '%s'" % nuage_vm_enterprise)
if vm_enterprise is None:
logger.error('Unable to find Nuage enterprise %s' % nuage_vm_enterprise)
return 1
logger.info('Nuage enterprise %s found' % nuage_vm_enterprise)
else:
clear(logger)
print('Please select your enterprise:')
index = 0
all_ent = nc.user.enterprises.get()
for cur_ent in all_ent:
print('%s. %s' % (index + 1, cur_ent.name))
index += 1
vm_enterprise = None
while vm_enterprise is None:
choice = eval(input('Please enter the number of the enterprise [1-%s]: ' % len(all_ent)))
choice = int(choice)
if choice > 0 and choice - 1 < len(all_ent):
vm_enterprise = all_ent[choice - 1]
break
print('Invalid choice, please try again')
# Verifying the Nuage User existence or selecting it
if nuage_vm_user:
logger.debug('Finding Nuage user %s' % nuage_vm_user)
vm_user = vm_enterprise.users.get_first(filter="userName == '%s'" % nuage_vm_user)
if vm_user is None:
logger.error('Unable to find Nuage user %s' % nuage_vm_user)
return 1
logger.info('Nuage user %s found' % nuage_vm_user)
else:
clear(logger)
print('Enterprise: %s' % vm_enterprise.name)
print(80 * '-')
print('Please select your user:')
index = 0
all_users = vm_enterprise.users.get()
for cur_user in all_users:
print('%s. %s' % (index + 1, cur_user.user_name))
index += 1
vm_user = None
while vm_user is None:
choice = eval(input('Please enter the number of the user [1-%s]: ' % len(all_users)))
choice = int(choice)
if choice > 0 and choice - 1 < len(all_users):
vm_user = all_users[choice - 1]
break
print('Invalid choice, please try again')
# Verifying the Nuage Domain existence or selecting it
if nuage_vm_domain:
logger.debug('Finding Nuage domain %s' % nuage_vm_domain)
vm_domain = vm_enterprise.domains.get_first(filter="name == '%s'" % nuage_vm_domain)
if vm_domain is None:
logger.error('Unable to find Nuage domain %s' % nuage_vm_domain)
return 1
logger.info('Nuage domain %s found' % nuage_vm_domain)
else:
clear(logger)
print('Enterprise: %s' % vm_enterprise.name)
print('User: %s' % vm_user.user_name)
print(80 * '-')
print('Please select your domain:')
index = 0
all_dom = vm_enterprise.domains.get()
for cur_dom in all_dom:
print('%s. %s' % (index + 1, cur_dom.name))
index += 1
vm_domain = None
while vm_domain is None:
choice = eval(input('Please enter the number of the domain [1-%s]: ' % len(all_dom)))
choice = int(choice)
if choice > 0 and choice - 1 < len(all_dom):
vm_domain = all_dom[choice - 1]
break
print('Invalid choice, please try again')
# Verifying the Nuage Zone existence or selecting it
if nuage_vm_zone:
logger.debug('Finding Nuage zone %s' % nuage_vm_zone)
vm_zone = vm_domain.zones.get_first(filter="name == '%s'" % nuage_vm_zone)
if vm_zone is None:
logger.error('Unable to find Nuage zone %s' % nuage_vm_zone)
return 1
logger.info('Nuage zone %s found' % nuage_vm_zone)
else:
clear(logger)
print('Enterprise: %s' % vm_enterprise.name)
print('User: %s' % vm_user.user_name)
print('Domain: %s' % vm_domain.name)
print(80 * '-')
print('Please select your zone:')
index = 0
all_zone = vm_domain.zones.get()
for cur_zone in all_zone:
print('%s. %s' % (index + 1, cur_zone.name))
index += 1
vm_zone = None
while vm_zone is None:
choice = eval(input('Please enter the number of the zone [1-%s]: ' % len(all_zone)))
choice = int(choice)
if choice > 0 and choice - 1 < len(all_zone):
vm_zone = all_zone[choice - 1]
break
print('Invalid choice, please try again')
# Verifying the Nuage Subnet existence or selecting it
if nuage_vm_subnet:
logger.debug('Finding Nuage subnet %s' % nuage_vm_subnet)
vm_subnet = vm_zone.subnets.get_first(filter="name == '%s'" % nuage_vm_subnet)
if vm_subnet is None:
logger.error('Unable to find Nuage subnet %s' % nuage_vm_subnet)
return 1
logger.info('Nuage subnet %s found' % nuage_vm_subnet)
else:
clear(logger)
print('Enterprise: %s' % vm_enterprise.name)
print('User: %s' % vm_user.user_name)
print('Domain: %s' % vm_domain.name)
print('Zone: %s' % vm_zone.name)
print(80 * '-')
print('Please select your subnet:')
index = 0
all_subnet = vm_zone.subnets.get()
for cur_subnet in all_subnet:
print('%s. %s - %s/%s' % (index + 1, cur_subnet.name, cur_subnet.address, cur_subnet.netmask))
index += 1
vm_subnet = None
while vm_subnet is None:
choice = eval(input('Please enter the number of the subnet [1-%s]: ' % len(all_subnet)))
choice = int(choice)
if choice > 0 and choice - 1 < len(all_subnet):
vm_subnet = all_subnet[choice - 1]
break
print('Invalid choice, please try again')
# Verifying the IP or asking for it
if nuage_vm_ip:
logger.debug('Verifying if IP %s is inside Nuage subnet %s range' % (nuage_vm_ip, vm_subnet.name))
if not ipaddress.ip_address(nuage_vm_ip) in ipaddress.ip_network('%s/%s' % (vm_subnet.address, vm_subnet.netmask)):
logger.error('IP %s is not part of subnet %s with netmask %s' % (nuage_vm_ip, vm_subnet.address, vm_subnet.netmask))
return 1
vm_ip = nuage_vm_ip
else:
clear(logger)
print('Enterprise: %s' % vm_enterprise.name)
print('User: %s' % vm_user.user_name)
print('Domain: %s' % vm_domain.name)
print('Zone: %s' % vm_zone.name)
print('Subnet: %s - %s/%s' % (vm_subnet.name, vm_subnet.address, vm_subnet.netmask))
print(80 * '-')
print('If you want a static IP, please enter it. Or press enter for a DHCP assigned IP.')
vm_ip = None
while vm_ip is None:
choice = eval(input('Please enter the IP or press enter for a DHCP assigned IP: '))
if not choice or ipaddress.ip_address(choice) in ipaddress.ip_network('%s/%s' % (vm_subnet.address, vm_subnet.netmask)):
vm_ip = choice
break
print('Invalid choice, please try again')
logger.info('Using following Nuage values:')
logger.info('Enterprise: %s' % vm_enterprise.name)
logger.info('User: %s' % vm_user.user_name)
logger.info('Domain: %s' % vm_domain.name)
logger.info('Zone: %s' % vm_zone.name)
logger.info('Subnet: %s - %s/%s' % (vm_subnet.name, vm_subnet.address, vm_subnet.netmask))
if vm_ip:
logger.info('Static IP: %s' % vm_ip)
# Find the correct VM
logger.debug('Finding template %s' % template)
template_vm = find_vm(vc, logger, template)
if template_vm is None:
logger.error('Unable to find template %s' % template)
return 1
logger.info('Template %s found' % template)
# Find the correct Resource Pool
resource_pool = None
if resource_pool_name is not None:
logger.debug('Finding resource pool %s' % resource_pool_name)
resource_pool = find_resource_pool(vc, logger, resource_pool_name)
if resource_pool is None:
logger.critical('Unable to find resource pool %s' % resource_pool_name)
return 1
logger.info('Resource pool %s found' % resource_pool_name)
# Find the correct folder
folder = None
if folder_name is not None:
logger.debug('Finding folder %s' % folder_name)
folder = find_folder(vc, logger, folder_name)
if folder is None:
logger.critical('Unable to find folder %s' % folder_name)
return 1
logger.info('Folder %s found' % folder_name)
else:
logger.info('Setting folder to template folder as default')
folder = template_vm.parent
# Creating necessary specs
logger.debug('Creating relocate spec')
if resource_pool is not None:
logger.debug('Resource pool found, using')
relocate_spec = vim.vm.RelocateSpec(pool=resource_pool)
else:
logger.debug('No resource pool found, continuing without it')
relocate_spec = vim.vm.RelocateSpec()
logger.debug('Creating clone spec')
clone_spec = vim.vm.CloneSpec(powerOn=False, template=False, location=relocate_spec)
run_loop = True
vm = None
logger.info('Trying to clone %s to new virtual machine' % template)
if find_vm(vc, logger, name):
logger.warning('Virtual machine already exists, not creating')
run_loop = False
else:
logger.debug('Creating clone task')
task = template_vm.Clone(name=name, folder=folder, spec=clone_spec)
logger.info('Cloning task created')
logger.info('Checking task for completion. This might take a while')
while run_loop:
info = task.info
logger.debug('Checking clone task')
if info.state == vim.TaskInfo.State.success:
logger.info('Cloned and running')
vm = info.result
run_loop = False
break
elif info.state == vim.TaskInfo.State.running:
logger.debug('Cloning task is at %s percent' % info.progress)
elif info.state == vim.TaskInfo.State.queued:
logger.debug('Cloning task is queued')
elif info.state == vim.TaskInfo.State.error:
if info.error.fault:
logger.info('Cloning task has quit with error: %s' % info.error.fault.faultMessage)
else:
logger.info('Cloning task has quit with cancelation')
run_loop = False
break
logger.debug('Sleeping 10 seconds for new check')
sleep(10)
# If the VM does not exist, cloning failed and the script is terminated
if not vm:
logger.error('Clone failed')
return 1
# Setting Nuage metadata
logger.info('Setting Nuage Metadata')
vm_option_values = []
# Enterprise
vm_option_values.append(vim.option.OptionValue(key='nuage.enterprise', value=vm_enterprise.name))
# Domain
vm_option_values.append(vim.option.OptionValue(key='nuage.nic0.domain', value=vm_domain.name))
# Zone
vm_option_values.append(vim.option.OptionValue(key='nuage.nic0.zone', value=vm_zone.name))
# Subnet
vm_option_values.append(vim.option.OptionValue(key='nuage.nic0.network', value=vm_subnet.name))
# Network type
vm_option_values.append(vim.option.OptionValue(key='nuage.nic0.networktype', value='ipv4'))
# User
vm_option_values.append(vim.option.OptionValue(key='nuage.user', value=vm_user.user_name))
# IP
if vm_ip:
vm_option_values.append(vim.option.OptionValue(key='nuage.nic0.ip', value=vm_ip))
logger.debug('Creating of config spec for VM')
config_spec = vim.vm.ConfigSpec(extraConfig=vm_option_values)
logger.info('Applying advanced parameters. This might take a couple of seconds')
config_task = vm.ReconfigVM_Task(spec=config_spec)
logger.debug('Waiting for the advanced paramerter to be applied')
run_loop = True
while run_loop:
info = config_task.info
if info.state == vim.TaskInfo.State.success:
logger.debug('Advanced parameters applied')
run_loop = False
break
elif info.state == vim.TaskInfo.State.error:
if info.error.fault:
logger.info('Applying advanced parameters has quit with error: %s' % info.error.fault.faultMessage)
else:
logger.info('Applying advanced parameters has quit with cancelation')
run_loop = False
break
sleep(5)
if power_on:
logger.info('Powering on VM. This might take a couple of seconds')
power_on_task = vm.PowerOn()
logger.debug('Waiting fo VM to power on')
run_loop = True
while run_loop:
info = power_on_task.info
if info.state == vim.TaskInfo.State.success:
run_loop = False
break
elif info.state == vim.TaskInfo.State.error:
if info.error.fault:
logger.info('Power on has quit with error: %s' % info.error.fault.faultMessage)
else:
logger.info('Power on has quit with cancelation')
run_loop = False
break
sleep(5)
except vmodl.MethodFault as e:
logger.critical('Caught vmodl fault: %s' % e.msg)
return 1
except Exception as e:
logger.critical('Caught exception: %s' % str(e))
return 1
# Start program
if __name__ == "__main__":
main()
```
#### File: vspk-examples/python/generic_network_provisionning.py
```python
from __future__ import print_function
from builtins import next
from builtins import range
import ipaddress
from vspk import v6 as vsdk
def populate_test_domain(domain, number_of_zones, number_of_subnets_per_zone, number_of_vports_per_subnet):
""" Populate a domain with test data
Args:
domain (vsdk.NUDomain | vsdk.NUDomainTemplate): base domain to populate
number_of_zones (int): number of desired zones
number_of_subnets_per_zone (int): number of desired subnets per zone
number_of_vports_per_subnet (int): number of desired vports per subnet (only available if domain is not a template)
"""
# check if the domain is a template
# if so use children template classes instead of instances
is_template = domain.is_template()
zone_class = vsdk.NUZoneTemplate if is_template else vsdk.NUZone
subnet_class = vsdk.NUSubnetTemplate if is_template else vsdk.NUSubnet
# generate a network and subnets
network = ipaddress.ip_network(u'10.0.0.0/8')
subnets = network.subnets(new_prefix=24)
# create zones
for i in range(0, number_of_zones):
zone = zone_class(name="Zone %d" % i)
domain.create_child(zone)
domain.add_child(zone)
#creates subnets
for j in range(0, number_of_subnets_per_zone):
# pull a subnet and get information about it
subnetwork = next(subnets)
ip = "%s" % subnetwork.network_address
gw = "%s" % next(subnetwork.hosts())
nm = "%s" % subnetwork.netmask
subnet = subnet_class(name="Subnet %d %d" % (i, j), address=ip, netmask=nm, gateway=gw)
zone.create_child(subnet)
zone.add_child(subnet)
# if the given domain is a template, we stop
if is_template:
break
# Otherwise we create the VPorts
for k in range(0, number_of_vports_per_subnet):
vport = vsdk.NUVPort(name="VPort %d-%d-%d" % (i, j, k), type="VM", address_spoofing="INHERITED", multicast="INHERITED")
subnet.create_child(vport)
subnet.add_child(vport)
if __name__ == "__main__":
session = vsdk.NUVSDSession(username='csproot', password='<PASSWORD>', enterprise='csp', api_url='https://localhost:8443')
session.start()
# get a domain
domain = vsdk.NUDomain(id="97c9ffac-c007-4cef-bb38-69aa91f7c258")
domain.fetch()
# do the job
populate_test_domain(domain, 3, 4, 5)
from time import sleep
print("Sleeping...")
sleep(6)
for zone in domain.zones:
zone.delete()
```
#### File: vspk-examples/python/nuage_acl_learner.py
```python
from future import standard_library
standard_library.install_aliases()
from builtins import str
import argparse
import getpass
import logging
import re
import string
import socketserver
import time
from vspk import v6 as vsdk
# Global variables
nc = None
nc_enterprise = None
nc_domain = None
nc_subnetmap = {}
nc_policygroupmap = {}
nc_vportmap = {}
nc_networkmacromap = {}
ingress_learning_acl = None
egress_learning_acl = None
logger = None
configuration = {}
flows = {}
ip_regex = re.compile('.*dir: (\w+).*ipv4\(src=([\d\.]+)[^,]*,dst=([\d\.]+)[^,]*,proto=(\w+).*')
traffic_regex = re.compile('.*(tcp|udp)\(src=(\d+)[^,]*,dst=(\d+)[^\)]*\).*')
class ACLTCPHandler(socketserver.StreamRequestHandler):
"""
Will handle ACL log messages and create appropriate ACLs
"""
def handle(self):
global flows, nc_networkmacromap, configuration
data = self.rfile.readline().strip()
logger.debug('Received message from %s: %s' % (self.client_address[0], data))
# Parsing message
ip_matches = ip_regex.match(data)
if ip_matches is None:
logger.debug('No valid stream found')
return 0
flow_matches = traffic_regex.match(data)
if flow_matches is None:
logger.debug('No valid TCP/UDP stream found')
return 0
stream_type = flow_matches.group(1)
stream_direction = ip_matches.group(1)
stream_src_ip = ip_matches.group(2)
stream_src_port = flow_matches.group(2)
stream_dst_ip = ip_matches.group(3)
stream_dst_port = flow_matches.group(3)
stream_protocol = ip_matches.group(4)
logger.debug('Found %s stream: direction %s - source ip %s - source port %s - destination ip %s - destination port %s - protocol %s' % (stream_type, stream_direction, stream_src_ip, stream_src_port, stream_dst_ip, stream_dst_port, stream_protocol))
if configuration['strictsource']:
flow_id = '%s_%s_%s_%s_%s' % (stream_type, stream_src_ip, stream_src_port, stream_dst_ip, stream_dst_port)
else:
flow_id = '%s_%s_%s_%s' % (stream_type, stream_src_ip, stream_dst_ip, stream_dst_port)
stream_src_port = '*'
if flow_id in flows:
logger.info('ACL already exists in the known flows, skipping handling it.')
return 0
src_vport = None
dst_vport = None
src_subnet = None
dst_subnet = None
src_pg = None
dst_pg = None
dst_nm = None
if stream_src_ip in nc_vportmap:
src_vport = nc_vportmap[stream_src_ip]
logger.debug('Found source vPort for IP %s with MAC %s' % (stream_src_ip, src_vport['mac']))
if configuration['acl_type'] == 'SUBNET' or configuration['acl_type'] == 'ZONE':
src_subnet = nc_subnetmap[src_vport['subnet']]
logger.debug('Found source subnet for IP %s: %s-%s' % (stream_src_ip, src_subnet['address'], src_subnet['netmask']))
if configuration['acl_type'] == 'POLICYGROUP':
if src_vport['policygroups'] > 0:
src_pg = src_vport['policygroups'][0]
logger.debug('Found source Policy Group %s for IP %s' % (src_pg['name'], stream_src_ip))
else:
logger.error('Source vPort with IP %s does not have a Policy Group assigned, can not create ACL rules' % stream_src_ip)
return 1
else:
logger.error('Unknown vPort for source IP %s, skipping this flow' % stream_src_ip)
return 1
if stream_dst_ip in nc_vportmap:
dst_vport = nc_vportmap[stream_dst_ip]
logger.debug('Found destination vPort for IP %s with MAC %s' % (stream_dst_ip, dst_vport['mac']))
if configuration['acl_type'] == 'SUBNET' or configuration['acl_type'] == 'ZONE':
dst_subnet = nc_subnetmap[dst_vport['subnet']]
logger.debug('Found destination subnet for IP %s: %s-%s' % (stream_dst_ip, dst_subnet['address'], dst_subnet['netmask']))
if configuration['acl_type'] == 'POLICYGROUP':
if dst_vport['policygroups'] > 0:
dst_pg = dst_vport['policygroups'][0]
logger.debug('Found destination Policy Group %s for IP %s' % (dst_pg['name'], stream_dst_ip))
else:
logger.error('Destination vPort with IP %s does not have a Policy Group assigned, can not create ACL rules' % stream_src_ip)
return 1
elif '%s-255.255.255.255' % stream_dst_ip in nc_networkmacromap:
logger.debug('vPort for destination IP %s does not exist, using existing /32 Network Macro' % stream_dst_ip)
dst_nm = nc_networkmacromap['%s-255.255.255.255' % stream_dst_ip]
logger.debug('Found destination network macro for IP %s' % stream_dst_ip)
else:
logger.debug('vPort or Network Macro for destination IP %s does not exist, creating a /32 Network Macro' % stream_dst_ip)
temp_nm_name = string.replace('%s-255.255.255.255' % stream_dst_ip, '.', '_')
temp_nm = vsdk.NUEnterpriseNetwork(
name=temp_nm_name,
address=stream_dst_ip,
netmask='255.255.255.255'
)
nc_enterprise.create_child(temp_nm)
logger.info('Created new Network Macro for destination IP %s' % stream_dst_ip)
dst_nm = {
'id': temp_nm.id,
'address': stream_dst_ip,
'netmask': '255.255.255.255'
}
nc_networkmacromap['%s-255.255.255.255' % stream_dst_ip] = dst_nm
src_type = None
src_id = None
if configuration['acl_type'] == 'ZONE':
src_type = 'ZONE'
src_id = src_subnet['zone']
elif configuration['acl_type'] == 'SUBNET':
src_type = 'SUBNET'
src_id = src_subnet['id']
elif configuration['acl_type'] == 'POLICYGROUP':
src_type = 'POLICYGROUP'
src_id = src_pg['id']
dst_type = None
dst_id = None
if dst_vport is not None and configuration['acl_type'] == 'ZONE':
dst_type = 'ZONE'
dst_id = dst_subnet['zone']
elif dst_vport is not None and configuration['acl_type'] == 'SUBNET':
dst_type = 'SUBNET'
dst_id = dst_subnet['id']
elif dst_vport is not None and configuration['acl_type'] == 'POLICYGROUP':
dst_type = 'POLICYGROUP'
dst_id = dst_pg['id']
else:
dst_type = 'ENTERPRISE_NETWORK'
dst_id = dst_nm['id']
stream_protocol = '17'
if stream_type == 'tcp':
stream_protocol = '6'
logger.debug('Creating new Ingress ACL rule with values: action FORWARD - ether_type 0x0800 - location_type %s - location_id %s - network_type %s - network_id %s - protocol %s - source_port %s - destination_port %s - dscp * - reflexive True - priority %s' % (src_type, src_id, dst_type, dst_id, stream_protocol, stream_src_port, stream_dst_port, configuration['next_priority']))
ingress_acl_entry = vsdk.NUIngressACLEntryTemplate(
action='FORWARD',
description='Learned - %s %s:%s to %s:%s' % (stream_type, stream_src_ip, stream_src_port, stream_dst_ip, stream_dst_port),
ether_type='0x0800',
location_type=src_type,
location_id=src_id,
network_type=dst_type,
network_id=dst_id,
protocol=stream_protocol,
source_port=stream_src_port,
destination_port=stream_dst_port,
dscp='*',
reflexive=True,
priority=configuration['next_priority']
)
# For now we work without jobs, way easier...
ingress_learning_acl.create_child(ingress_acl_entry, as_async=False)
flows[flow_id] = {
'action': 'FORWARD',
'description': 'Learned - %s %s:%s to %s:%s' % (stream_type, stream_src_ip, stream_src_port, stream_dst_ip, stream_dst_port),
'ether_type': '0x0800',
'location_type': src_type,
'location_id': src_id,
'network_type': dst_type,
'network_id': dst_id,
'protocol': stream_protocol,
'source_port': stream_src_port,
'destination_port': stream_dst_port,
'dscp': '*',
'reflexive': True,
'priority': configuration['next_priority']
}
configuration['next_priority'] += 1
return 0
def get_args():
"""
Supports the command-line arguments listed below.
"""
parser = argparse.ArgumentParser(description="Tool which will create ACLs learned from flow logs from the VRS. It will actively listen to incomming syslog connections on port 514.")
parser.add_argument('-d', '--debug', required=False, help='Enable debug output', dest='debug', action='store_true')
parser.add_argument('-f', '--first-priority', required=False, help='The priority of the first created rule (will be incremented for each next rule), default is 100', dest='first_priority', type=int, default=100)
parser.add_argument('-l', '--log-file', required=False, help='File to log to (default = stdout)', dest='logfile', type=str)
parser.add_argument('-D', '--nuage-domain', required=True, help='The domain to investigate and set ACLs on', dest='nuage_domain', type=str)
parser.add_argument('-E', '--nuage-enterprise', required=True, help='The enterprise with which to connect to the Nuage VSD/SDK host', dest='nuage_enterprise', type=str)
parser.add_argument('-H', '--nuage-host', required=True, help='The Nuage VSD/SDK endpoint to connect to', dest='nuage_host', type=str)
parser.add_argument('-P', '--nuage-port', required=False, help='The Nuage VSD/SDK server port to connect to (default = 8443)', dest='nuage_port', type=int, default=8443)
parser.add_argument('-p', '--nuage-password', required=False, help='The password with which to connect to the Nuage VSD/SDK host. If not specified, the user is prompted at runtime for a password', dest='nuage_password', type=str)
parser.add_argument('-u', '--nuage-user', required=True, help='The username with which to connect to the Nuage VSD/SDK host', dest='nuage_username', type=str)
parser.add_argument('-S', '--disable-SSL-certificate-verification', required=False, help='Disable SSL certificate verification on connect (deprecated)', dest='nosslcheck', action='store_true')
parser.add_argument('-s', '--strict-source-ports', required=False, help='Use strict source ports, this will set the specific source port instead of the default * setting for Ingress rules.', dest='strictsource', action='store_true')
parser.add_argument('-t', '--type', required=True, help='On what entity type should the ACLs be applied. Valid responses: POLICYGROUP, ZONE, SUBNET', dest='acl_type', type=str, choices=['POLICYGROUP', 'ZONE', 'SUBNET'])
parser.add_argument('-v', '--verbose', required=False, help='Enable verbose output', dest='verbose', action='store_true')
args = parser.parse_args()
return args
def wait_for_job(parent, job):
logger.debug('Creating Job with command %s' % job.command)
parent.create_child(job)
while True:
logger.debug('Fetching update on the job with command %s' % job.command)
job.fetch()
if job.status == 'SUCCESS':
logger.debug('Job with command %s executed succesfully returning result %s' % (job.command, job.result))
return job.result
elif job.status != 'RUNNING':
logger.error('Job with command %s failed, status is %s, returning False' % (job.command, job.status))
return False
time.sleep(1)
def main():
"""
Main function to handle vcenter vm names and the mapping to a policy group
"""
global logger, configuration, nc, nc_enterprise, nc_domain, nc_subnetmap, nc_policygroupmap, nc_vportmap, nc_networkmacromap, ingress_learning_acl, egress_learning_acl
# Handling arguments
args = get_args()
configuration['debug'] = args.debug
configuration['next_priority'] = args.first_priority
configuration['log_file'] = None
if args.logfile:
configuration['log_file'] = args.logfile
configuration['nuage_domain'] = args.nuage_domain
configuration['nuage_enterprise'] = args.nuage_enterprise
configuration['nuage_host'] = args.nuage_host
configuration['nuage_port'] = args.nuage_port
configuration['nuage_password'] = None
if args.nuage_password:
configuration['nuage_password'] = args.nuage_password
configuration['nuage_username'] = args.nuage_username
configuration['strictsource'] = args.strictsource
configuration['nosslcheck'] = args.nosslcheck
configuration['acl_type'] = args.acl_type
configuration['verbose'] = args.verbose
# Logging settings
if configuration['debug']:
log_level = logging.DEBUG
elif configuration['verbose']:
log_level = logging.INFO
else:
log_level = logging.WARNING
logging.basicConfig(filename=configuration['log_file'], format='%(asctime)s %(levelname)s %(message)s', level=log_level)
logger = logging.getLogger(__name__)
# Getting user password for Nuage connection
if configuration['nuage_password'] is None:
logger.debug('No command line Nuage password received, requesting Nuage password from user')
configuration['nuage_password'] = getpass.getpass(prompt='Enter password for Nuage host %s for user %s: ' % (configuration['nuage_host'], configuration['nuage_username']))
try:
# Connecting to Nuage
logger.info('Connecting to Nuage server %s:%s with username %s' % (configuration['nuage_host'], configuration['nuage_port'], configuration['nuage_username']))
nc = vsdk.NUVSDSession(username=configuration['nuage_username'], password=configuration['nuage_password'], enterprise=configuration['nuage_enterprise'], api_url="https://%s:%s" % (configuration['nuage_host'], configuration['nuage_port']))
nc.start()
except Exception as e:
logger.error('Could not connect to Nuage host %s with user %s and specified password' % (configuration['nuage_host'], configuration['nuage_username']))
logger.critical('Caught exception: %s' % str(e))
return 1
# Finding domain
logger.debug('Finding domain %s' % configuration['nuage_domain'])
nc_domain = nc.user.domains.get_first(filter="name == '%s'" % configuration['nuage_domain'])
if nc_domain is None:
logger.critical('Unable to find domain %s, quiting' % configuration['nuage_domain'])
return 1
logger.info('Found domain %s' % nc_domain.name)
# Getting enterprise
logger.debug('Getting enterprise for domain %s' % nc_domain.name)
nc_enterprise = vsdk.NUEnterprise(id=nc_domain.parent_id)
nc_enterprise.fetch()
if configuration['acl_type'] == 'SUBNET' or configuration['acl_type'] == 'ZONE':
# Mapping subnets
logger.debug('Mapping subnets for domain %s' % nc_domain.name)
for nc_subnet in nc_domain.subnets.get():
logger.debug('Found subnet with network %s/%s in domain %s' % (nc_subnet.address, nc_subnet.netmask, nc_domain.name))
nc_subnetmap[nc_subnet.id] = {
'id': nc_subnet.id,
'address': nc_subnet.address,
'netmask': nc_subnet.netmask,
'zone': nc_subnet.parent_id
}
if configuration['acl_type'] == 'POLICYGROUP':
# Mapping policy groups
logger.debug('Mapping policy groups for domain %s' % nc_domain.name)
for nc_policygroup in nc_domain.policy_groups.get():
logger.debug('Found policy group %s in domain %s' % (nc_policygroup.name, nc_domain.name))
nc_policygroupmap[nc_policygroup.id] = {
'id': nc_policygroup.id,
'name': nc_policygroup.name
}
# Mapping vPorts
logger.debug('Mapping vPorts for domain %s' % nc_domain.name)
for nc_vport in nc_domain.vports.get():
logger.debug('Found vPort with IP %s and MAC %s in domain %s' % (nc_vport.vm_interfaces.get_first().ip_address, nc_vport.vm_interfaces.get_first().mac, nc_domain.name))
nc_vportmap[nc_vport.vm_interfaces.get_first().ip_address] = {
'id': nc_vport.id,
'mac': nc_vport.vm_interfaces.get_first().mac,
'subnet': nc_vport.parent_id,
'policygroups': []
}
for nc_policygroup in nc_vport.policy_groups.get():
logger.debug('Found policy group %s for vPort with %s and MAC %s in domain %s' % (nc_policygroup.name, nc_vport.vm_interfaces.get_first().ip_address, nc_vport.vm_interfaces.get_first().mac, nc_domain.name))
nc_vportmap[nc_vport.vm_interfaces.get_first().ip_address]['policygroups'].append({
'id': nc_policygroup.id,
'name': nc_policygroup.name
})
# Mapping Network Macros
logger.debug('Mapping Network Macros for enterprise %s' % nc_enterprise.name)
for nc_networkmacro in nc_enterprise.enterprise_networks.get():
logger.debug('Found Network Macro with IP %s and netmask %s for Enterprise %s' % (nc_networkmacro.address, nc_networkmacro.netmask, nc_enterprise.name))
nc_networkmacromap['%s-%s' % (nc_networkmacro.address, nc_networkmacro.netmask)] = {
'id': nc_networkmacro.id,
'address': nc_networkmacro.address,
'netmask': nc_networkmacro.netmask
}
# Checking if ACL logging rules are present
ingress_learning_acl = nc_domain.ingress_acl_templates.get_first(filter="name == 'Ingress Learning ACLs'")
egress_learning_acl = nc_domain.egress_acl_templates.get_first(filter="name == 'Egress Learning ACLs'")
if ingress_learning_acl is None:
logger.info('Creating Ingress Learning ACLs')
ingress_learning_acl = vsdk.NUIngressACLTemplate(
name='Ingress Learning ACLs',
priority_type='NONE',
priority=100,
default_allow_non_ip=False,
default_allow_ip=False,
allow_l2_address_spoof=False,
active=True
)
nc_domain.create_child(ingress_learning_acl, as_async=False)
logger.debug('Creating Ingress ACL TCP rule')
ingress_acl_entry_1 = vsdk.NUIngressACLEntryTemplate(
action='FORWARD',
description='Learning ACL for TCP traffic',
ether_type='0x0800',
flow_logging_enabled=True,
location_type='ANY',
network_type='ANY',
priority=1000,
protocol=6,
reflexive=True,
source_port='*',
destination_port='*',
dscp='*'
)
ingress_learning_acl.create_child(ingress_acl_entry_1, as_async=False)
logger.debug('Creating Ingress ACL UDP rule')
ingress_acl_entry_2 = vsdk.NUIngressACLEntryTemplate(
action='FORWARD',
description='Learning ACL for UDP traffic',
ether_type='0x0800',
flow_logging_enabled=True,
location_type='ANY',
network_type='ANY',
priority=1001,
protocol=17,
reflexive=True,
source_port='*',
destination_port='*',
dscp='*'
)
ingress_learning_acl.create_child(ingress_acl_entry_2, as_async=False)
logger.debug('Creating Ingress ACL other rule')
ingress_acl_entry_3 = vsdk.NUIngressACLEntryTemplate(
action='FORWARD',
description='Learning ACL for other traffic',
ether_type='0x0800',
flow_logging_enabled=True,
location_type='ANY',
network_type='ANY',
priority=1002,
protocol='ANY',
source_port=None,
destination_port=None,
dscp='*'
)
ingress_learning_acl.create_child(ingress_acl_entry_3, as_async=False)
logger.info('Ingress ACL rules created')
if egress_learning_acl is None:
logger.info('Creating Egress Learning ACLs')
egress_learning_acl = vsdk.NUEgressACLTemplate(
name='Egress Learning ACLs',
priority_type='NONE',
priority=100,
default_allow_non_ip=False,
default_allow_ip=False,
default_install_acl_implicit_rules=True,
active=True
)
nc_domain.create_child(egress_learning_acl, as_async=False)
logger.debug('Creating Egress ACL TCP rule')
egress_acl_entry_1 = vsdk.NUEgressACLEntryTemplate(
action='FORWARD',
description='ACL for TCP traffic',
ether_type='0x0800',
flow_logging_enabled=True,
location_type='ANY',
network_type='ANY',
priority=1000,
protocol=6,
reflexive=True,
source_port='*',
destination_port='*',
dscp='*'
)
egress_learning_acl.create_child(egress_acl_entry_1, as_async=False)
logger.debug('Creating Egress ACL UDP rule')
egress_acl_entry_2 = vsdk.NUEgressACLEntryTemplate(
action='FORWARD',
description='ACL for UDP traffic',
ether_type='0x0800',
flow_logging_enabled=True,
location_type='ANY',
network_type='ANY',
priority=1001,
protocol=17,
reflexive=True,
source_port='*',
destination_port='*',
dscp='*'
)
egress_learning_acl.create_child(egress_acl_entry_2, as_async=False)
logger.debug('Creating Egress ACL other rule')
egress_acl_entry_3 = vsdk.NUEgressACLEntryTemplate(
action='FORWARD',
description='ACL for other traffic',
ether_type='0x0800',
flow_logging_enabled=True,
location_type='ANY',
network_type='ANY',
priority=1002,
protocol='ANY',
source_port=None,
destination_port=None,
dscp='*'
)
egress_learning_acl.create_child(egress_acl_entry_3, as_async=False)
logger.info('Egress ACL rules created')
logger.info('Starting capture server on port 514')
capture_server = socketserver.TCPServer(('0.0.0.0', 514), ACLTCPHandler)
try:
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
capture_server.serve_forever()
except KeyboardInterrupt:
logger.info('Received interrupt, finishing up')
capture_server.shutdown()
logger.info('All done!')
return 1
# Start program
if __name__ == "__main__":
main()
```
#### File: vspk-examples/python/show_pushcenter_notifications.py
```python
import logging
from time import sleep
from vspk import v6 as vsdk
from vspk.utils import set_log_level
set_log_level(logging.ERROR)
def did_receive_push(data):
""" Receive delegate
"""
import pprint
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(data)
if __name__ == '__main__':
import sys
sys.setrecursionlimit(50)
# create a user session for user csproot
session = vsdk.NUVSDSession(username="csproot", password="<PASSWORD>", enterprise="csp", api_url="https://localhost:8443")
# start the session
# now session contains a push center and the connected user
session.start()
session.reset()
session.start()
# we get the push center from the session
push_center = session.push_center
# we register our delegate that will be called on each event
push_center.add_delegate(did_receive_push)
# and we start it
push_center.start()
# then we do nothing, welcome to the marvelous world of async programing ;)
while True:
sleep(10000)
``` |
{
"source": "jml/edn-profiling",
"score": 3
} |
#### File: jml/edn-profiling/gen-much-edn.py
```python
import datetime
import decimal
import random
import sys
import edn
DICTIONARY_FILE = '/usr/share/dict/words'
def load_words(dictionary):
with open(dictionary, 'r') as dictionary_file:
return [x.strip() for x in dictionary_file.readlines()]
WORDS = load_words(DICTIONARY_FILE)
def random_words(n):
for i in range(n):
word = random.choice(WORDS)
try:
yield word.decode('ascii')
except UnicodeDecodeError:
continue
def random_decimal():
value = random.randint(-500000, 500000) / 100.0
return decimal.Decimal(value).quantize(decimal.Decimal('0.01'))
def random_day():
return datetime.date(2013, 1, 1) + datetime.timedelta(random.randint(0, 365))
def make_element():
return {edn.Keyword('description'): ' '.join(random_words(3)),
edn.Keyword('amount'): random_decimal(),
edn.Keyword('date'): random_day()}
num = int(sys.argv[1])
for i in range(num):
print edn.dumps(
make_element(),
[(datetime.date, edn.Symbol('day'), lambda x: x.strftime('%Y-%m-%d'))],
)
``` |
{
"source": "jmlemetayer/inotify_watcher",
"score": 3
} |
#### File: jmlemetayer/inotify_watcher/example.py
```python
from __future__ import annotations
import logging
import sys
import tempfile
from inotify_watcher import InotifyWatcher
logging.basicConfig(level=logging.DEBUG)
def main() -> int:
"""Show the basic usage of the `InotifyWatcher` class.
This function creates a temporary directory and then print every
inotify events received.
"""
with tempfile.TemporaryDirectory() as watched_dir:
try:
watcher = InotifyWatcher(
watched_dir,
file_watched=lambda p: print(f"file {p} watched"),
file_created=lambda p: print(f"file {p} created"),
file_updated=lambda p: print(f"file {p} updated"),
file_modified=lambda p: print(f"file {p} modified"),
file_moved=lambda p, n: print(f"file {p} moved to {n}"),
file_deleted=lambda p: print(f"file {p} deleted"),
file_gone=lambda p: print(f"file {p} gone"),
dir_watched=lambda p: print(f"directory {p} watched"),
dir_created=lambda p: print(f"directory {p} created"),
dir_updated=lambda p: print(f"directory {p} updated"),
dir_moved=lambda p, n: print(f"directory {p} moved to {n}"),
dir_deleted=lambda p: print(f"directory {p} deleted"),
dir_gone=lambda p: print(f"directory {p} gone"),
)
print(f"watched directory: {watched_dir}")
watcher.wait()
except KeyboardInterrupt:
watcher.close()
return 0
if __name__ == "__main__":
sys.exit(main())
```
#### File: inotify_watcher/tests/__init__.py
```python
from __future__ import annotations
import logging
import pathlib
from typing import Any
import inotify_simple
logger = logging.getLogger(__name__)
class InotifyEventTest:
"""A wrapper of the `inotify_simple.Event`.
This wrapper is used to add some extra features to the
`inotify_simple.Event`.
See Also
--------
inotify_simple.Event
"""
def __init__(
self, event: inotify_simple.Event, wd_paths: dict[int | str, pathlib.Path]
) -> None:
"""Create the `inotify_simple.Event` wrapper object.
Parameters
----------
event: inotify_simple.Event
The original inotify event object.
wd_paths: dict[int, pathlib.Path]
The watch descriptor vs path lookup table.
The key represent the watch descriptor (`int`) and the value the
path (`pathlib.Path`).
If available the watch descriptor ``"root"`` represents the root
path to allow the pretty print to use a relative path.
"""
self.__event = event
self.__wd_paths = wd_paths
self.wd = self.__event.wd
self.mask = self.__event.mask
self.cookie = self.__event.cookie
self.name = self.__event.name or None
self.path = self.__wd_paths[self.wd]
self.flags = [f.name for f in inotify_simple.flags.from_mask(self.mask)]
def __str__(self) -> str:
"""Pretty print the object.
Returns
-------
object_string: str
The object representation string.
"""
root = self.__wd_paths.get("root")
path = self.path.relative_to(root) if root else self.path
return (
"InotifyEventTest("
f"wd={self.wd} "
f"path={path} "
f"name={self.name} "
f"mask={self.mask} "
f"flags={self.flags} "
f"cookie={self.cookie}"
")"
)
def match(self, **kwargs: Any) -> bool:
"""Test the object for matching criteria.
Parameters
----------
kwargs
Key-value pairs to be compared to the object's attributes.
Returns
-------
matching_status: bool
Indicates if the object meets the criteria.
Raises
------
ValueError
If a criterion key is not part of the object's attributes.
"""
for key, value in kwargs.items():
if hasattr(self, key):
if getattr(self, key) != value:
return False
else:
raise ValueError(f"Invalid criterion key: {key}")
return True
class InotifyTest:
"""A wrapper of the `inotify_simple.INotify`.
This wrapper is used to simplify some usage of the `inotify_simple.INotify`.
See Also
--------
inotify_simple.INotify
"""
def __init__(self, root: pathlib.Path | None = None) -> None:
"""Create the `inotify_simple.INotify` wrapper object.
Parameters
----------
root: pathlib.Path, optional
If this argument is provided the pretty print of the generated
events will use relative path to this root path.
"""
self.__inotify = inotify_simple.INotify()
self.__wd_paths: dict[int | str, pathlib.Path] = dict()
if root is not None:
self.__wd_paths["root"] = root
def add_watch(self, path: pathlib.Path) -> None:
"""Add a path to the watch list.
Parameters
----------
path: pathlib.Path
The path to add to the watch list.
"""
wd = self.__inotify.add_watch(path, inotify_simple.masks.ALL_EVENTS)
self.__wd_paths[wd] = path
def read_events(self) -> list[InotifyEventTest]:
"""Read the `inotify_simple.Event` and wrap them.
Returns
-------
events: list[InotifyEventTest]
The list of events that have been read.
"""
return [
InotifyEventTest(e, self.__wd_paths)
for e in self.__inotify.read(timeout=100, read_delay=100)
]
```
#### File: inotify_watcher/tools/debug_helpers.py
```python
from __future__ import annotations
import functools
import inspect
import logging
from typing import Any
from typing import Callable
logger = logging.getLogger(__name__)
def log_function_name(function: Callable[..., Any]) -> Callable[..., Any]:
"""Allow to log the function name before each call.
This is a decorator function.
"""
@functools.wraps(function)
def log_and_call_function(*args: Any, **kwargs: Any) -> Any:
stack_level = len(
[x for x in inspect.stack() if x[3] != "log_and_call_function"]
)
logger.debug(f"{' ' * stack_level}-> {function.__qualname__}")
value = function(*args, **kwargs)
logger.debug(f"{' ' * stack_level}<- {function.__qualname__}")
return value
return log_and_call_function
``` |
{
"source": "jmlgomez73/Automatic-Notion-Backup",
"score": 2
} |
#### File: jmlgomez73/Automatic-Notion-Backup/notion_backup_via_zip.py
```python
import os
import sys
import json
import time
import urllib
import urllib.request
import shutil
from datetime import datetime
import zipfile
import json
TIME_ZONE = "Europe/Paris"
NOTION_API = 'https://www.notion.so/api/v3'
data = {}
with open(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)),'config.json'))) as file:
data = json.load(file)
NOTION_TOKEN_V2 = data["NOTION_TOKEN_V2"]
NOTION_SPACE_ID = data["NOTION_SPACE_ID"]
EXPORT_FILENAME = "export.zip"
TARGET_PATH = data["TARGET_PATH"]
ENQUEUE_TASK_PARAM = {
"task": {
"eventName": "exportSpace", "request": {
"spaceId": NOTION_SPACE_ID,
"exportOptions": {"exportType": "markdown", "timeZone": TIME_ZONE, "locale": "en"}
}
}
}
def request(endpoint: str, params: object):
req = urllib.request.Request(
f'{NOTION_API}/{endpoint}',
data = json.dumps(params).encode('utf8'),
headers = {
'content-type': 'application/json',
'cookie': f'token_v2={NOTION_TOKEN_V2}; '
},
)
response = urllib.request.urlopen(req)
return json.loads(response.read().decode('utf8'))
def export():
try:
task_id = request('enqueueTask', ENQUEUE_TASK_PARAM).get('taskId')
except Exception as e:
with open("C:\\Users\\Shockz\\Desktop\\NotionError.txt","w") as f:
f.write(str(e))
quit()
print(f'Enqueued task {task_id}')
while True:
time.sleep(2)
tasks = request("getTasks", {"taskIds": [task_id]}).get('results')
task = next(t for t in tasks if t.get('id') == task_id)
print(f'\rPages exported: {task.get("status").get("pagesExported")}', end="")
if task.get('state') == 'success':
break
export_url = task.get('status').get('exportURL')
print(f'\nExport created, downloading: \n{export_url}')
urllib.request.urlretrieve(
export_url, os.path.join(TARGET_PATH, EXPORT_FILENAME),
reporthook = lambda c, bs, ts: print(f"\r{int(c * bs * 100 / ts)}%", end="")
)
print(f'\nDownload complete: {EXPORT_FILENAME}')
save()
def save():
today = datetime.today().strftime('%d-%m-%Y')
SAVE_DIR= TARGET_PATH
exported_file = os.path.join(SAVE_DIR, 'export.zip')
name = "notion_export-" + today
base_name = os.path.join(SAVE_DIR, "notion_export-")
today_path = os.path.join(base_name + today)
today_file = today_path + ".zip"
backups = [d for d in os.listdir(SAVE_DIR) if "notion_export" in d]
if not os.path.exists(today_file):
if len(backups) > 1:
os.remove(base_name + datetime.strftime(min([datetime.strptime(str(back.split('export-')[1].replace('.zip','')),'%d-%m-%Y') for back in backups]),'%d-%m-%Y') + '.zip')
if exported_file:
with zipfile.ZipFile(exported_file) as zip:
zip.extractall(today_path)
os.remove(exported_file)
"""
if not os.path.exists(today_path):
if len(backups) > 1:
shutil.rmtree(base_name + datetime.strftime(min([datetime.strptime(str(back.split('export-')[1]),'%d-%m-%Y') for back in backups]),'%d-%m-%Y'))
if exported_file:
with zipfile.ZipFile(exported_file) as zip:
zip.extractall(today_path)
os.remove(exported_file)
"""
removing_identifiers(today_path, today_file)
def removing_identifiers(today_path : str, today_file : str):
ids_files = []
for base, dirs, files in os.walk(today_path):
for file in files:
if file.endswith('.md'):
files_pattern = file.split(" ")[-1].split(".")[0]
ids_files.append(files_pattern)
ids_files = list(set(ids_files))
temp_dirs = os.walk(today_path, topdown=False)
for root, dirs, files in temp_dirs:
for i in dirs:
directory = os.path.join(root,i)
try:
os.rename(directory,directory.replace(directory.split(" ")[-1],""))
except (FileExistsError,FileNotFoundError): # log file
with open(os.path.join(TARGET_PATH, "log.txt"),'a', encoding="utf8") as f:
f.write(directory + '\n')
temp_file = os.walk(today_path, topdown=False)
for root, dirs, files in temp_file:
for file in files:
if file.endswith('.md'):
file_path = os.path.join(root,file)
new_path = file_path.replace(" "+file_path.split(" ")[-1].split(".")[0],"")
lines = []
with open(file_path,'r', encoding="utf8") as f:
lines = f.readlines()
rem = False
try:
with open(new_path, "w", encoding="utf8") as f:
for line in lines:
for idn in ids_files:
line = line.replace("%20"+idn,"")
f.write(line)
rem = True
except:
with open(os.path.join(TARGET_PATH, "log.txt"),'a', encoding="utf8") as f:
f.write(new_path + '\n')
rem = False
if rem and os.path.exists(new_path):
os.remove(file_path)
else:
with open(os.path.join(TARGET_PATH, "log.txt"),'a', encoding="utf8") as f:
f.write("FAILING" + new_path + '\n')
def zipdir(path, ziph):
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file),
os.path.relpath(os.path.join(root, file),
os.path.join(path, '..')))
with zipfile.ZipFile(f'{today_file}', 'w', zipfile.ZIP_DEFLATED) as zipf:
zipdir(f'{today_path}/', zipf)
shutil.rmtree(today_path)
if __name__ == "__main__":
export()
``` |
{
"source": "jmlgomez73/MyBackupDrive",
"score": 2
} |
#### File: MyBackupDrive/Sources/automatic.py
```python
from win10toast import ToastNotifier
import pathlib
from backup import recompile
from utils import check_space_availability
def show_toast_task(title, message, path_image):
toaster = ToastNotifier()
toaster.show_toast(title,
message,
duration=10, icon_path=path_image)
if __name__ == "__main__":
try:
working_directory = str(pathlib.Path(
__file__).parent.absolute().parent.absolute())+"\\Resources\\icon.ico"
if(check_space_availability() == False):
working_directory = str(pathlib.Path(
__file__).parent.absolute().parent.absolute())+"\\Resources\\error.ico"
show_toast_task("Problem with DriveMyFiles",
"There is not enough space in drive", working_directory)
else:
output = recompile()
show_toast_task(
"DriveMyFiles", "Backup successfully", working_directory)
except (OSError, IndexError, FileNotFoundError) as e:
working_directory = str(pathlib.Path(
__file__).parent.absolute().parent.absolute())+"\\Resources\\error.ico"
show_toast_task(" Error in DriveMyFiles",
"An error occurred in the automatic backup", working_directory)
```
#### File: MyBackupDrive/Sources/utils.py
```python
import os
import time
from logger_settings import logger
from Sources.json_handler import json_handler
from Sources.drive import get_size as cloud_size, get_files, del_backup
import shutil
"""Calculate the size of the files or the files of a directory
Returns: size of the files/dirs, number of files and number of folders
"""
def get_size():
json_data = json_handler()
lista = json_data.get_list("DIRECTORIES")
num_files = 0
num_dir = 0
total = 0
logger.info("Starting to calculate size of the files ...")
for ruta in lista:
if os.path.exists(ruta):
if os.path.isdir(ruta):
num_dir += 1
total += get_directory_size(ruta)
else:
num_files += 1
total += os.path.getsize(ruta)
logger.info("Calculated size ...")
return get_size_format(total), str(num_files), str(num_dir)
"""Calculate the size of the files of a directory"""
def get_directory_size(directory):
total = 0
try:
for entry in os.scandir(directory):
if entry.is_file():
total += entry.stat().st_size
elif entry.is_dir():
total += get_directory_size(entry.path)
except NotADirectoryError:
logger.error("The file is not a directory")
return os.path.getsize(directory)
except PermissionError:
logger.error("The user doesnt have permissions to access this file or directory")
return 0
return total
"""Translate bytes to a more expresive unit"""
def get_size_format(b,factor=1024):
logger.info("Transforming to a more expressive unit ...")
for unit in ["B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB"]:
if b < factor:
return f"{b:.2f} {unit}"
b/= factor
return f"{b:.2f}Y"
"""Sets the values of the local sizes in the json configuration file"""
def set_local_sizes():
json_data = json_handler()
size, num_files, num_folders = get_size()
json_data.write_field("SIZES", num_files, "LOCAL_FILES")
json_data.write_field("SIZES", num_folders, "LOCAL_FOLDERS")
json_data.write_field("SIZES", size, "LOCAL_SIZE")
"""Sets the values of the cloud sizes in the json configuration file"""
def set_cloud_sizes():
json_data = json_handler()
used, free, total, percent = cloud_size()
json_data.write_field("SIZES", used, "CLOUD_USED")
json_data.write_field("SIZES", free, "CLOUD_FREE")
json_data.write_field("SIZES", total, "CLOUD_TOTAL")
json_data.write_field("SIZES", percent, "CLOUD_PERCENT")
"""Check if there is enough space to add one more element."""
def check_space_availability():
json_data = json_handler()
local_size = get_size()[0].split(" ")
cloud_free = json_data.get_list("SIZES", "CLOUD_FREE").split(" ")
units = ["B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB"]
if ((units.index(local_size[1]) >= units.index(cloud_free[1])) and (float(local_size[0]) >= (float(cloud_free[0])-0.3))):# Error margin of 300 MB
print(units.index(local_size[1]),units.index(cloud_free[1]),local_size[0],float(cloud_free[0])-0.3)
return False
else:
return True
"""Remove the oldest n local backups set by the user
"""
def local_cleaner():
import stat
json_data = json_handler()
if json_data.get_list("OPTIONS", "DELETE_BACKUP_LOCAL"):
dirs = os.listdir('Temp')
dates = [time.ctime(os.path.getctime('Temp\\'+file)) for file in dirs]
count = len(dates) - json_data.get_list("OPTIONS", "NUM_BACKUP_LOCAL")
if count > 0:
for file in dirs[0:count]:
path = 'Temp\\' + file
try:
if os.path.isdir(path):
shutil.rmtree(path, ignore_errors=True, onerror=lambda func, path, _: (os.chmod(path, stat.S_IWRITE), func(path)))
else:
os.remove(path)
except OSError as e:
logger.error("Could not delete the directory - " + e.strerror)
logger.info("Complete cleaning of local backups")
"""Remove the oldest n cloud backups set by the user
"""
def cloud_cleaner():
json_data = json_handler()
if json_data.get_list("OPTIONS", "DELETE_BACKUP_CLOUD"):
list_backups = get_files(True)
if list_backups:
count = len(list_backups) - json_data.get_list("OPTIONS", "NUM_BACKUP_CLOUD")
if count > 0:
for bkup in list(list_backups.keys())[0:count]:
del_backup(list_backups[bkup])
logger.info("Complete cleaning of cloud backups")
``` |
{
"source": "jml-happy/SegCaps_tf2",
"score": 3
} |
#### File: SegCaps_tf2/segcapsnet/capsnet.py
```python
from tensorflow.keras import layers, models
from tensorflow.keras import backend as K
K.set_image_data_format('channels_last')
from segcapsnet.capsule_layers import ConvCapsuleLayer, DeconvCapsuleLayer, Mask, Length
def CapsNetR3(input_shape, n_class=2, enable_decoder=True):
x = layers.Input(shape=input_shape) # x=keras_shape(None, 512, 512, 3)
# Layer 1: Just a conventional Conv2D layer
conv1 = layers.Conv2D(filters=16, kernel_size=5, strides=1, padding='same', activation='relu', name='conv1')(x)
# Reshape layer to be 1 capsule x [filters] atoms
_, H, W, C = conv1.get_shape() # _, 512, 512, 16
conv1_reshaped = layers.Reshape((H, W, 1, C))(conv1)
# Layer 1: Primary Capsule: Conv cap with routing 1
primary_caps = ConvCapsuleLayer(kernel_size=5, num_capsule=2, num_atoms=16, strides=2, padding='same',
routings=1, name='primarycaps')(conv1_reshaped)
# Layer 2: Convolutional Capsule
conv_cap_2_1 = ConvCapsuleLayer(kernel_size=5, num_capsule=4, num_atoms=16, strides=1, padding='same',
routings=3, name='conv_cap_2_1')(primary_caps)
# Layer 2: Convolutional Capsule
conv_cap_2_2 = ConvCapsuleLayer(kernel_size=5, num_capsule=4, num_atoms=32, strides=2, padding='same',
routings=3, name='conv_cap_2_2')(conv_cap_2_1)
# Layer 3: Convolutional Capsule
conv_cap_3_1 = ConvCapsuleLayer(kernel_size=5, num_capsule=8, num_atoms=32, strides=1, padding='same',
routings=3, name='conv_cap_3_1')(conv_cap_2_2)
# Layer 3: Convolutional Capsule
conv_cap_3_2 = ConvCapsuleLayer(kernel_size=5, num_capsule=8, num_atoms=64, strides=2, padding='same',
routings=3, name='conv_cap_3_2')(conv_cap_3_1)
# Layer 4: Convolutional Capsule
conv_cap_4_1 = ConvCapsuleLayer(kernel_size=5, num_capsule=8, num_atoms=32, strides=1, padding='same',
routings=3, name='conv_cap_4_1')(conv_cap_3_2)
# Layer 1 Up: Deconvolutional Capsule
deconv_cap_1_1 = DeconvCapsuleLayer(kernel_size=4, num_capsule=8, num_atoms=32, upsamp_type='deconv',
scaling=2, padding='same', routings=3,
name='deconv_cap_1_1')(conv_cap_4_1)
# Skip connection
up_1 = layers.Concatenate(axis=-2, name='up_1')([deconv_cap_1_1, conv_cap_3_1])
# Layer 1 Up: Deconvolutional Capsule
deconv_cap_1_2 = ConvCapsuleLayer(kernel_size=5, num_capsule=4, num_atoms=32, strides=1,
padding='same', routings=3, name='deconv_cap_1_2')(up_1)
# Layer 2 Up: Deconvolutional Capsule
deconv_cap_2_1 = DeconvCapsuleLayer(kernel_size=4, num_capsule=4, num_atoms=16, upsamp_type='deconv',
scaling=2, padding='same', routings=3,
name='deconv_cap_2_1')(deconv_cap_1_2)
# Skip connection
up_2 = layers.Concatenate(axis=-2, name='up_2')([deconv_cap_2_1, conv_cap_2_1])
# Layer 2 Up: Deconvolutional Capsule
deconv_cap_2_2 = ConvCapsuleLayer(kernel_size=5, num_capsule=4, num_atoms=16, strides=1,
padding='same', routings=3, name='deconv_cap_2_2')(up_2)
# Layer 3 Up: Deconvolutional Capsule
deconv_cap_3_1 = DeconvCapsuleLayer(kernel_size=4, num_capsule=2, num_atoms=16, upsamp_type='deconv',
scaling=2, padding='same', routings=3,
name='deconv_cap_3_1')(deconv_cap_2_2)
# Skip connection
up_3 = layers.Concatenate(axis=-2, name='up_3')([deconv_cap_3_1, conv1_reshaped])
# Layer 4: Convolutional Capsule: 1x1
seg_caps = ConvCapsuleLayer(kernel_size=1, num_capsule=1, num_atoms=16, strides=1, padding='same',
routings=3, name='seg_caps')(up_3)
# Layer 4: This is an auxiliary layer to replace each capsule with its length. Just to match the true label's shape.
# Calculates euclidean norm via tf.norm
out_seg = Length(num_classes=n_class, seg=True, name='out_seg')(seg_caps)
# Decoder network.
_, H, W, C, A = seg_caps.get_shape() #(?, 512, 512, 1, 16)
y = layers.Input(shape=input_shape[:-1]+(1,)) #y: keras_shape(512, 512, 1)
# Simply multiplication of the two.
masked_by_y = Mask()([seg_caps, y]) # The true label is used to mask the output of capsule layer. For training (None, 512, 512, 1, 16)
# Manually calculates euclidean norm (not using tf.norm?) and then uses onehot?
masked = Mask()(seg_caps) # Mask using the capsule with maximal length. For prediction ()
def shared_decoder(mask_layer):
recon_remove_dim = layers.Reshape((H, W, A))(mask_layer) #mask_layer=(?, 512, 512, 1, 16)
recon_1 = layers.Conv2D(filters=64, kernel_size=1, padding='same', kernel_initializer='he_normal',
activation='relu', name='recon_1')(recon_remove_dim)
recon_2 = layers.Conv2D(filters=128, kernel_size=1, padding='same', kernel_initializer='he_normal',
activation='relu', name='recon_2')(recon_1)
out_recon = layers.Conv2D(filters=1, kernel_size=1, padding='same', kernel_initializer='he_normal',
activation='sigmoid', name='out_recon')(recon_2)
return out_recon
# Models for training and evaluation (prediction)
train_model = models.Model(inputs=[x, y], outputs=[out_seg, shared_decoder(masked_by_y)])
if enable_decoder == True:
eval_model = models.Model(inputs=x, outputs=[out_seg, shared_decoder(masked)])
else:
eval_model = models.Model(inputs=x, outputs=[out_seg])
# manipulate model
noise = layers.Input(shape=((H, W, C, A)))
noised_seg_caps = layers.Add()([seg_caps, noise])
masked_noised_y = Mask()([noised_seg_caps, y])
manipulate_model = models.Model(inputs=[x, y, noise], outputs=shared_decoder(masked_noised_y))
return train_model, eval_model, manipulate_model
def CapsNetR1(input_shape, n_class=2):
x = layers.Input(shape=input_shape)
# Layer 1: Just a conventional Conv2D layer
conv1 = layers.Conv2D(filters=16, kernel_size=5, strides=1, padding='same', activation='relu', name='conv1')(x)
# Reshape layer to be 1 capsule x [filters] atoms
_, H, W, C = conv1.get_shape()
conv1_reshaped = layers.Reshape((H, W, 1, C))(conv1)
# Layer 1: Primary Capsule: Conv cap with routing 1
primary_caps = ConvCapsuleLayer(kernel_size=5, num_capsule=2, num_atoms=16, strides=2, padding='same',
routings=1, name='primarycaps')(conv1_reshaped)
# Layer 2: Convolutional Capsule
conv_cap_2_1 = ConvCapsuleLayer(kernel_size=5, num_capsule=4, num_atoms=16, strides=1, padding='same',
routings=1, name='conv_cap_2_1')(primary_caps)
# Layer 2: Convolutional Capsule
conv_cap_2_2 = ConvCapsuleLayer(kernel_size=5, num_capsule=4, num_atoms=32, strides=2, padding='same',
routings=3, name='conv_cap_2_2')(conv_cap_2_1)
# Layer 3: Convolutional Capsule
conv_cap_3_1 = ConvCapsuleLayer(kernel_size=5, num_capsule=8, num_atoms=32, strides=1, padding='same',
routings=1, name='conv_cap_3_1')(conv_cap_2_2)
# Layer 3: Convolutional Capsule
conv_cap_3_2 = ConvCapsuleLayer(kernel_size=5, num_capsule=8, num_atoms=64, strides=2, padding='same',
routings=3, name='conv_cap_3_2')(conv_cap_3_1)
# Layer 4: Convolutional Capsule
conv_cap_4_1 = ConvCapsuleLayer(kernel_size=5, num_capsule=8, num_atoms=32, strides=1, padding='same',
routings=1, name='conv_cap_4_1')(conv_cap_3_2)
# Layer 1 Up: Deconvolutional Capsule
deconv_cap_1_1 = DeconvCapsuleLayer(kernel_size=4, num_capsule=8, num_atoms=32, upsamp_type='deconv',
scaling=2, padding='same', routings=3,
name='deconv_cap_1_1')(conv_cap_4_1)
# Skip connection
up_1 = layers.Concatenate(axis=-2, name='up_1')([deconv_cap_1_1, conv_cap_3_1])
# Layer 1 Up: Deconvolutional Capsule
deconv_cap_1_2 = ConvCapsuleLayer(kernel_size=5, num_capsule=4, num_atoms=32, strides=1,
padding='same', routings=1, name='deconv_cap_1_2')(up_1)
# Layer 2 Up: Deconvolutional Capsule
deconv_cap_2_1 = DeconvCapsuleLayer(kernel_size=4, num_capsule=4, num_atoms=16, upsamp_type='deconv',
scaling=2, padding='same', routings=3,
name='deconv_cap_2_1')(deconv_cap_1_2)
# Skip connection
up_2 = layers.Concatenate(axis=-2, name='up_2')([deconv_cap_2_1, conv_cap_2_1])
# Layer 2 Up: Deconvolutional Capsule
deconv_cap_2_2 = ConvCapsuleLayer(kernel_size=5, num_capsule=4, num_atoms=16, strides=1,
padding='same', routings=1, name='deconv_cap_2_2')(up_2)
# Layer 3 Up: Deconvolutional Capsule
deconv_cap_3_1 = DeconvCapsuleLayer(kernel_size=4, num_capsule=2, num_atoms=16, upsamp_type='deconv',
scaling=2, padding='same', routings=3,
name='deconv_cap_3_1')(deconv_cap_2_2)
# Skip connection
up_3 = layers.Concatenate(axis=-2, name='up_3')([deconv_cap_3_1, conv1_reshaped])
# Layer 4: Convolutional Capsule: 1x1
seg_caps = ConvCapsuleLayer(kernel_size=1, num_capsule=1, num_atoms=16, strides=1, padding='same',
routings=1, name='seg_caps')(up_3)
# Layer 4: This is an auxiliary layer to replace each capsule with its length. Just to match the true label's shape.
out_seg = Length(num_classes=n_class, seg=True, name='out_seg')(seg_caps)
# Decoder network.
_, H, W, C, A = seg_caps.get_shape()
y = layers.Input(shape=input_shape[:-1]+(1,))
masked_by_y = Mask()([seg_caps, y]) # The true label is used to mask the output of capsule layer. For training
masked = Mask()(seg_caps) # Mask using the capsule with maximal length. For prediction
def shared_decoder(mask_layer):
recon_remove_dim = layers.Reshape((H, W, A))(mask_layer)
recon_1 = layers.Conv2D(filters=64, kernel_size=1, padding='same', kernel_initializer='he_normal',
activation='relu', name='recon_1')(recon_remove_dim)
recon_2 = layers.Conv2D(filters=128, kernel_size=1, padding='same', kernel_initializer='he_normal',
activation='relu', name='recon_2')(recon_1)
out_recon = layers.Conv2D(filters=1, kernel_size=1, padding='same', kernel_initializer='he_normal',
activation='sigmoid', name='out_recon')(recon_2)
return out_recon
# Models for training and evaluation (prediction)
train_model = models.Model(inputs=[x, y], outputs=[out_seg, shared_decoder(masked_by_y)])
eval_model = models.Model(inputs=x, outputs=[out_seg, shared_decoder(masked)])
# manipulate model
noise = layers.Input(shape=((H, W, C, A)))
noised_seg_caps = layers.Add()([seg_caps, noise])
masked_noised_y = Mask()([noised_seg_caps, y])
manipulate_model = models.Model(inputs=[x, y, noise], outputs=shared_decoder(masked_noised_y))
return train_model, eval_model, manipulate_model
def CapsNetBasic(input_shape, n_class=2):
x = layers.Input(shape=input_shape)
# Layer 1: Just a conventional Conv2D layer
conv1 = layers.Conv2D(filters=256, kernel_size=5, strides=1, padding='same', activation='relu', name='conv1')(x)
# Reshape layer to be 1 capsule x [filters] atoms
_, H, W, C = conv1.get_shape()
conv1_reshaped = layers.Reshape((H, W, 1, C))(conv1)
# Layer 1: Primary Capsule: Conv cap with routing 1
primary_caps = ConvCapsuleLayer(kernel_size=5, num_capsule=8, num_atoms=32, strides=1, padding='same',
routings=1, name='primarycaps')(conv1_reshaped)
# Layer 4: Convolutional Capsule: 1x1
seg_caps = ConvCapsuleLayer(kernel_size=1, num_capsule=1, num_atoms=16, strides=1, padding='same',
routings=3, name='seg_caps')(primary_caps)
# Layer 4: This is an auxiliary layer to replace each capsule with its length. Just to match the true label's shape.
out_seg = Length(num_classes=n_class, seg=True, name='out_seg')(seg_caps)
# Decoder network.
_, H, W, C, A = seg_caps.get_shape()
y = layers.Input(shape=input_shape[:-1]+(1,))
masked_by_y = Mask()([seg_caps, y]) # The true label is used to mask the output of capsule layer. For training
masked = Mask()(seg_caps) # Mask using the capsule with maximal length. For prediction
def shared_decoder(mask_layer):
recon_remove_dim = layers.Reshape((H, W, A))(mask_layer)
recon_1 = layers.Conv2D(filters=64, kernel_size=1, padding='same', kernel_initializer='he_normal',
activation='relu', name='recon_1')(recon_remove_dim)
recon_2 = layers.Conv2D(filters=128, kernel_size=1, padding='same', kernel_initializer='he_normal',
activation='relu', name='recon_2')(recon_1)
out_recon = layers.Conv2D(filters=1, kernel_size=1, padding='same', kernel_initializer='he_normal',
activation='sigmoid', name='out_recon')(recon_2)
return out_recon
# Models for training and evaluation (prediction)
train_model = models.Model(inputs=[x, y], outputs=[out_seg, shared_decoder(masked_by_y)])
eval_model = models.Model(inputs=x, outputs=[out_seg, shared_decoder(masked)])
# manipulate model
noise = layers.Input(shape=((H, W, C, A)))
noised_seg_caps = layers.Add()([seg_caps, noise])
masked_noised_y = Mask()([noised_seg_caps, y])
manipulate_model = models.Model(inputs=[x, y, noise], outputs=shared_decoder(masked_noised_y))
return train_model, eval_model, manipulate_model
``` |
{
"source": "jmlinx/afterthought",
"score": 4
} |
#### File: afterthought/afterthought/trigger.py
```python
class Timer():
"""
Timer is applied to set the rebalance frequency of Backtestor.BySchedule.
It initialized a counter. Everytime when update_scheduler() is called, the
Timer will add one tothe counter. When is_trade_time() is called, if the
counter equals to the preset period, it will return True, and reset the
counter to zero.
Parameters:
-----------
period: int
time interval to return True
counter_init: int
initial value of counter. Default as period - 1, so the is_trade_time()
will immediately return True if update_scheduler() is call once.
"""
def __init__(self, period, counter_init=None):
self.period = period
self.counter_init = counter_init
self._init_counter()
def _init_counter(self):
if self.counter_init:
self.counter = self.counter_init
else:
self.counter = self.period - 1
def update_scheduler(self, *args):
self._count_time()
def is_trade_time(self):
if self.counter == self.period:
self._reset_counter()
return True
else:
return False
def _count_time(self):
self.counter += 1
def _reset_counter(self):
self.counter = 0
def reset_scheduler(self):
self._init_counter()
class Scheduler():
"""
Schedule the trading on certain dates.
Parameters
----------
schedule: list of time object
"""
def __init__(self, schedule):
self.schedule = schedule
self.iter_schedule = iter(self.schedule)
self.trade_time = next(self.iter_schedule)
def update_scheduler(self, time):
self.now = time
def is_trade_time(self):
if self.now >= self.trade_time:
next_trade_time = next(self.iter_schedule, None)
if next_trade_time is not None:
self.trade_time = next_trade_time
# print('Rebalance: ', self.now)
return True
def reset_scheduler(self):
self.iter_schedule = iter(self.schedule)
``` |
{
"source": "jmlipman/MedicDeepLabv3Plus",
"score": 3
} |
#### File: lib/blocks/BasicBlocks.py
```python
import torch
from torch import nn
from torch.nn.functional import interpolate, softmax
class Interpolate(nn.Module):
def __init__(self):
"""
"""
super(Interpolate, self).__init__()
def forward(self, input, size=None, scale_factor=None, mode="nearest", align_corners=None):
return interpolate(input, size=size, scale_factor=scale_factor, mode=mode, align_corners=align_corners)
class Sigmoid(nn.Module):
def __init__(self):
"""
"""
super(Sigmoid, self).__init__()
def forward(self, input):
return torch.sigmoid(input)
class Softmax(nn.Module):
def __init__(self):
"""
"""
super(Softmax, self).__init__()
def forward(self, input, dim=None, _stacklevel=3, dtype=None):
return softmax(input, dim=dim, _stacklevel=_stacklevel, dtype=dtype)
class Cat(nn.Module):
def __init__(self):
"""Concatenation
"""
super(Cat, self).__init__()
def forward(self, tensors, dim=1, out=None):
return torch.cat(tensors, dim=dim, out=out)
class Sum(nn.Module):
def __init__(self):
"""
"""
super(Sum, self).__init__()
def forward(self, input, dtype=None):
out = input[0] + input[1]
for i in range(2, len(input)):
out += input[i]
return out
```
#### File: lib/models/BaseModel.py
```python
import torch
from lib.utils import log, he_normal, removeSmallIslands, combineLabels
from lib.utils import softmax2onehot, sigmoid2onehot
import os, time, json
from torch import nn
from datetime import datetime
import numpy as np
from lib.metric import Metric
class BaseModel(nn.Module):
def __init__(self):
super(BaseModel, self).__init__()
def initialize(self, device, output="", model_state=""):
"""Sets the device, output path, and loads model's parameters if needed
Args:
`device`: Device where the computations will be performed. "cuda:0"
`output`: Path where the output will be saved. If no output is
given, don't expect it will save anything. If by any change tries
to save something, it will probably throw an error.
`model_state`: Path to load stored parameters.
"""
# Bring the model to GPU
self.device = device
self.out_path = output
self.to(self.device)
# Load or initialize weights
if model_state != "":
print("Loading previous model")
self.load_state_dict(torch.load(model_state, map_location=self.device))
else:
def weight_init(m):
if isinstance(m, torch.nn.Conv2d) or isinstance(m, torch.nn.Conv3d):
he_normal(m.weight)
torch.nn.init.zeros_(m.bias)
self.apply(weight_init)
def fit(self, tr_loader, val_loader, epochs, val_interval,
loss, val_metrics, opt):
"""Trains the NN.
Args:
`tr_loader`: DataLoader with the training set.
`val_loader`: DataLoader with the validaiton set.
`epochs`: Number of epochs to train the model. If 0, no train.
`val_interval`: After how many epochs to perform validation.
`loss`: Name of the loss function.
`val_metrics`: Which metrics to measure at validation time.
`opt`: Optimizer.
"""
t0 = time.time()
e = 1
# Expected classes of our dataset
measure_classes = {0: "background", 1: "contra", 2: "R_hemisphere"}
# Which classes will be reported during validation
measure_classes_mean = np.array([1, 2])
while e <= epochs:
self.train()
tr_loss = 0
for (tr_i), (X, Y, info, W) in enumerate(tr_loader):
X = [x.to(self.device) for x in X]
Y = [y.to(self.device) for y in Y]
W = [w.to(self.device) for w in W]
output = self(X)
pred = output[0]
tr_loss_tmp = loss(output, Y, W)
tr_loss += tr_loss_tmp
# Optimization
opt.zero_grad()
tr_loss_tmp.backward()
opt.step()
tr_loss /= len(tr_loader)
if len(val_loader) != 0 and e % val_interval == 0:
log("Validation", self.out_path)
self.eval()
val_loss = 0
# val_scores stores all needed metrics for assessing validation
val_scores = np.zeros((len(val_metrics), len(val_loader), len(measure_classes)))
Measure = Metric(val_metrics, onehot=softmax2onehot,
classes=measure_classes, multiprocess=False)
with torch.no_grad():
for (val_i), (X, Y, info, W) in enumerate(val_loader):
X = [x.to(self.device) for x in X]
Y = [y.to(self.device) for y in Y]
W = [w.to(self.device) for w in W]
output = self(X)
val_loss_tmp = loss(output, Y, W)
val_loss += val_loss_tmp
y_true_cpu = Y[0].cpu().numpy()
y_pred_cpu = output[0].cpu().numpy()
# Record all needed metrics
# If batch_size > 1, Measure.all() returns an avg.
tmp_res = Measure.all(y_pred_cpu, y_true_cpu)
for i, m in enumerate(val_metrics):
val_scores[i, val_i] = tmp_res[m]
# Validation loss
val_loss /= len(val_loader)
val_str = " Val Loss: {}".format(val_loss)
# val_metrics shape: num_metrics x num_batches x num_classes
for i, m in enumerate(val_metrics):
# tmp shape: num_classes (averaged over num_batches when val != -1)
tmp = np.array(Measure._getMean(val_scores[i]))
# Mean validation value in metric m (all interesting classes)
tmp_val = tmp[measure_classes_mean]
# Note: if tmp_val is NaN, it means that the classes I am
# interested in (check lib/data/whatever, measure_classes_mean)
# were not found in the validation set.
tmp_val = np.mean(tmp_val[tmp_val != -1])
val_str += ". Val " + m + ": " + str(tmp_val)
else:
val_str = ""
eta = " ETA: " + datetime.fromtimestamp(time.time() + (epochs-e)*(time.time()-t0)/e).strftime("%Y-%m-%d %H:%M:%S")
log("Epoch: {}. Loss: {}.".format(e, tr_loss) + val_str + eta, self.out_path)
# Save model after every epoch
torch.save(self.state_dict(), self.out_path + "model/MedicDeepLabv3Plus-model-" + str(e))
if e > 1 and os.path.exists(self.out_path + "model/MedicDeepLabv3Plus-model-"+str(e-1)):
os.remove(self.out_path + "model/MedicDeepLabv3Plus-model-" + str(e-1))
e += 1
def evaluate(self, test_loader, metrics, remove_islands, save_output=True):
"""Tests/Evaluates the NN.
Args:
`test_loader`: DataLoader containing the test set. Batch_size = 1.
`metrics`: Metrics to measure.
`save_output`: (bool) whether to save the output segmentations.
`remove_islands`: (bool) whether to apply post-processing.
"""
# Expected classes of our dataset
measure_classes = {0: "background", 1: "contra", 2: "R_hemisphere"}
results = {}
self.eval()
Measure = Metric(metrics, onehot=sigmoid2onehot,
classes=measure_classes,
multiprocess=True)
# Pool to store pieces of output that will be put together
# before evaluating the whole image.
# This is useful when the entire image doesn't fit into mem.
with torch.no_grad():
for (test_i), (X, Y, info, W) in enumerate(test_loader):
print("{}/{}".format(test_i+1, len(test_loader)))
X = [x.to(self.device) for x in X]
Y = [y.to(self.device) for y in Y]
W = [w.to(self.device) for w in W]
id_ = info["id"][0]
output = self(X)
y_pred_cpu = output[0].cpu().numpy()
y_true_cpu = Y[0].cpu().numpy()
if remove_islands:
y_pred_cpu = removeSmallIslands(y_pred_cpu, thr=20)
# Predictions (and GT) separate the two hemispheres
# combineLabels will combine these such that it creates
# brainmask and contra-hemisphere ROIs instead of
# two different hemisphere ROIs.
y_pred_cpu = combineLabels(y_pred_cpu)
# If GT was provided it measures the performance
if len(y_true_cpu.shape) > 1:
y_true_cpu = combineLabels(y_true_cpu)
results[id_] = Measure.all(y_pred_cpu, y_true_cpu)
test_loader.dataset.save(y_pred_cpu[0], info,
self.out_path + id_)
# Gather results (multiprocessing)
for k in results:
results[k] = results[k].get()
if len(results) > 0:
with open(self.out_path + "stats.json", "w") as f:
f.write(json.dumps(results))
# If we are using multiprocessing we need to close the pool
Measure.close()
```
#### File: jmlipman/MedicDeepLabv3Plus/train.py
```python
import os, time, torch, json
import numpy as np
import nibabel as nib
from lib.utils import *
from lib.losses import Loss
from torch.utils.data import DataLoader
from datetime import datetime
from lib.models.MedicDeepLabv3Plus import MedicDeepLabv3Plus
from lib.data.DataWrapper import DataWrapper
def get_arguments():
"""Gets (and parses) the arguments from the command line.
Args:
`args`: If None, it takes the arguments from the command line.
Else, it will parse `args` (used for testing with sacred)
"""
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser()
# Data
parser.add_argument("--input", type=str, required=True,
help="Directory with the data for optimizing MedicDeepLabv3+")
# Training
parser.add_argument("--epochs", type=int, default=300,
help="Epochs. If 0: only evaluate")
parser.add_argument("--batch_size", type=int, default=1,
help="Batch size")
parser.add_argument("--lr", type=float, default="1e-4",
help="Learning rate")
parser.add_argument("--wd", type=float, default="0",
help="Weight decay")
parser.add_argument("--filters", type=int, default=32,
help="Number of filters (fewer filters -> lower GPU requirements)")
# Validation
parser.add_argument("--validation", type=str, default="",
help="Directory with the data for validation")
parser.add_argument("--val_interval", type=int, default=1,
help="After how many epochs data is validated")
parser.add_argument("--val_metrics", type=str, default="dice",
help="List of metrics to measure during validation")
# Other
parser.add_argument("--output", type=str, required=True,
help="Output directory (if it doesn't exist, it will create it)")
parser.add_argument("--gpu", type=int, default=0, dest="device",
help="GPU Device. Write -1 if no GPU is available")
parser.add_argument("--model_state", type=str, default="",
help="File that contains the saved parameters of the model")
parsed = parser.parse_args()
# --input
if not os.path.isdir(parsed.input):
raise Exception("The input folder `" + parsed.input + "` does not exist")
# --output
if os.path.exists(parsed.output):
if os.path.isfile(parsed.output):
raise Exception("The provided path for the --output `" + parsed.output + "` corresponds to an existing file. Provide a non-existing path or a folder.")
elif os.path.isdir(parsed.output):
files = [int(f) for f in os.listdir(parsed.output) if f.isdigit()]
parsed.output = os.path.join(parsed.output, str(len(files)+1), "")
os.makedirs(parsed.output)
else:
raise Exception("The provided path for the --output `" + parsed.output + "` is invalid. Provide a non-existing path or a folder.")
else:
parsed.output = os.path.join(parsed.output, "1", "")
os.makedirs(parsed.output)
# --validation
if parsed.validation != "" and not os.path.isdir(parsed.validation):
raise Exception("The validaiton folder `" + parsed.validation + "` does not exist")
if parsed.validation == "":
print("> Note: No validation data was provided, so validation won't be done during MedicDeepLabv3+ optimization")
# --gpu
if parsed.device >= torch.cuda.device_count():
if torch.cuda.device_count() == 0:
print("> No available GPUs. Add --gpu -1 to not use GPU. NOTE: This may take FOREVER to run.")
else:
print("> Available GPUs:")
for i in range(torch.cuda.device_count()):
print(" > GPU #"+str(i)+" ("+torch.cuda.get_device_name(i)+")")
raise Exception("The GPU #"+str(parsed.device)+" does not exist. Check available GPUs.")
if parsed.device > -1:
parsed.device = "cuda:"+str(parsed.device)
else:
parsed.device = "cpu"
# Metrics to be evaluated during evaluation
allowed_metrics = ["dice", "HD", "compactness"]
# Metrics to be evaluated during validation
parsed.val_metrics = parsed.val_metrics.split(",")
for m in parsed.val_metrics:
if not m in allowed_metrics:
raise Exception("Wrong --val_metrics: "+str(m)+". Only allowed: "+str(allowed_metrics))
return parsed
def main(args):
log("Start training MedicDeepLabv3+", args.output)
# Creates the folder where the models will be saved
os.makedirs(args.output + "model")
# Parameters required to initialize the model
model = MedicDeepLabv3Plus(modalities=1, n_classes=3, first_filters=args.filters)
model.initialize(device=args.device, output=args.output,
model_state=args.model_state)
# Dataloaders
tr_data = DataWrapper(args.input, "train")
val_data = DataWrapper(args.validation, "val")
if len(tr_data) > 0 and args.epochs > 0:
# DataLoaders
tr_loader = DataLoader(tr_data, batch_size=args.batch_size,
shuffle=True, pin_memory=False, num_workers=6)
if len(val_data) > 0:
val_loader = DataLoader(val_data, batch_size=args.batch_size,
shuffle=False, pin_memory=False, num_workers=6)
else:
val_loader = [] # So that len(val_loader) = 0
# Loss function
loss = Loss("CrossEntropyDiceLoss_multiple") # Deep supervision
# Optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr,
weight_decay=args.wd)
# Train the model
model.fit(tr_loader=tr_loader, val_loader=val_loader,
epochs=args.epochs, val_interval=args.val_interval,
loss=loss, val_metrics=args.val_metrics, opt=optimizer)
log("End", args.output)
if __name__ == "__main__":
# Get command-line arguments
args = get_arguments()
# Train MedicDeepLabv3+
main(args)
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.