repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
TeamSWAP/swap | external/pyinstaller/tests/runtests.py | 1 | 24562 | #! /usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2013, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
# This program will execute any file with name test*<digit>.py. If your test
# need an aditional dependency name it test*<digit><letter>.py to be ignored
# by this program but be recognizable by any one as a dependency of that
# particular test.
import glob
import optparse
import os
import re
import shutil
import sys
# Expand PYTHONPATH with PyInstaller package to support running without
# installation.
pyi_home = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
sys.path.insert(0, pyi_home)
from PyInstaller import HOMEPATH
from PyInstaller import compat, configure
from PyInstaller import main as pyi_main
from PyInstaller.compat import is_py25, is_py26, is_win, is_darwin
from PyInstaller.lib import unittest2 as unittest
from PyInstaller.lib import junitxml
from PyInstaller.utils import misc
VERBOSE = False
REPORT = False
PYI_CONFIG = {}
# Directory with this script (runtests.py).
BASEDIR = os.path.dirname(os.path.abspath(__file__))
class MiscDependencies(object):
"""
Place holder for special requirements of some tests.
e.g. basic/test_ctypes needs C compiler.
Every method returns None when successful or a string containing
error message to be displayed on console.
"""
def c_compiler(self):
"""
Check availability of C compiler.
"""
compiler = None
msg = 'Cannot find GCC, MinGW or Visual Studio in PATH.'
if is_win:
# Try MSVC.
compiler = misc.find_executable('cl')
if compiler is None:
# Try GCC.
compiler = misc.find_executable('gcc')
if compiler is None:
return msg
return None # C compiler was found.
class SkipChecker(object):
"""
Check conditions if a test case should be skipped.
"""
def __init__(self):
depend = MiscDependencies()
# Required Python or OS version for some tests.
self.MIN_VERSION_OR_OS = {
'basic/test_celementtree': is_py25,
'basic/test_email': is_py25,
# On Mac DYLD_LIBRARY_PATH is not used.
'basic/test_absolute_ld_library_path': not is_win and not is_darwin,
'import/test_c_extension': is_py25,
'import/test_onefile_c_extension': is_py25,
'import/test_onefile_relative_import': is_py25,
'import/test_onefile_relative_import2': is_py26,
'import/test_onefile_relative_import3': is_py25,
'libraries/test_enchant': is_win,
# docutils, a sphinx dependency, fails in
# docutils.utils.__init__.py, function decode_path, where
# sys.getfilesystemencoding() returns None when frozen.
# Docutils doesn't expect this and throws an assertion.
# Untested on Mac, but this shouldn't be a problem, since
# Macs return 'utf-8'.
'libraries/test_sphinx': is_win or is_darwin,
}
# Required Python modules for some tests.
self.MODULES = {
'basic/test_codecs': ['codecs'],
'basic/test_module_attributes': ['xml.etree.cElementTree'],
'basic/test_multiprocess': ['multiprocessing'],
'basic/test_onefile_ctypes': ['ctypes'],
'basic/test_onefile_multiprocess': ['multiprocessing'],
'basic/test_onefile_nestedlaunch1': ['ctypes'],
'basic/test_onefile_win32com': ['win32com'],
'basic/test_pkg_structures': ['pkg_resources'],
'libraries/test_enchant': ['enchant'],
'libraries/test_Image': ['PIL'],
'libraries/test_Image2': ['PIL'],
'libraries/test_numpy': ['numpy'],
'libraries/test_onefile_matplotlib': ['matplotlib'],
'libraries/test_onefile_tkinter': ['Tkinter'],
'libraries/test_PIL': ['PIL'],
'libraries/test_PIL2': ['PIL'],
'libraries/test_pycrypto': ['Crypto'],
'libraries/test_pyodbc': ['pyodbc'],
'libraries/test_pyttsx': ['pyttsx'],
'libraries/test_pytz': ['pytz'],
'libraries/test_scipy': ['numpy', 'scipy'],
'libraries/test_sqlalchemy': ['sqlalchemy', 'MySQLdb', 'psycopg2'],
'libraries/test_twisted_qt4reactor': ['twisted', 'PyQt4'],
'libraries/test_twisted_reactor': ['twisted'],
'libraries/test_usb': ['ctypes', 'usb'],
'libraries/test_wx': ['wx'],
'libraries/test_wx_pubsub': ['wx'],
'libraries/test_wx_pubsub_arg1': ['wx'],
'libraries/test_wx_pubsub_kwargs': ['wx'],
'libraries/test_sphinx': ['sphinx', 'docutils', 'jinja2', 'uuid'],
'import/test_c_extension': ['simplejson'],
'import/test_ctypes_cdll_c': ['ctypes'],
'import/test_eggs2': ['pkg_resources'],
'import/test_onefile_c_extension': ['simplejson'],
'import/test_onefile_ctypes_cdll_c': ['ctypes'],
'import/test_onefile_zipimport': ['pkg_resources'],
'import/test_onefile_zipimport2': ['pkg_resources', 'setuptools'],
'import/test_pep302_import_protokol': ['sqlite3'],
'interactive/test_pygame': ['pygame'],
'interactive/test_pyqt4_multiprocessing': ['multiprocessing', 'PyQt4'],
}
# Other dependecies of some tests.
self.DEPENDENCIES = {
'basic/test_onefile_ctypes': [depend.c_compiler()],
# Support for unzipped eggs is not yet implemented.
# http://www.pyinstaller.org/ticket/541
'import/test_eggs1': ['Unzipped eggs not yet implemented.'],
}
def _check_python_and_os(self, test_name):
"""
Return True if test name is not in the list or Python or OS
version is not met.
"""
if (test_name in self.MIN_VERSION_OR_OS and
not self.MIN_VERSION_OR_OS[test_name]):
return False
return True
def _check_modules(self, test_name):
"""
Return name of missing required module, if any. None means
no module is missing.
"""
if test_name in self.MODULES:
for mod_name in self.MODULES[test_name]:
# STDOUT and STDERR are discarded (devnull) to hide
# import exceptions.
trash = open(os.devnull)
retcode = compat.exec_python_rc('-c', "import %s" % mod_name,
stdout=trash, stderr=trash)
trash.close()
if retcode != 0:
return mod_name
return None
def _check_dependencies(self, test_name):
"""
Return error message when a requirement is not met, None otherwise.
"""
if test_name in self.DEPENDENCIES:
for dep in self.DEPENDENCIES[test_name]:
if dep is not None:
return dep
return None
def check(self, test_name):
"""
Check test requirements if they are any specified.
Return tupple (True/False, 'Reason for skipping.').
True if all requirements are met. Then test case may
be executed.
"""
if not self._check_python_and_os(test_name):
return (False, 'Required another Python version or OS.')
required_module = self._check_modules(test_name)
if required_module is not None:
return (False, "Module %s is missing." % required_module)
dependency = self._check_dependencies(test_name)
if dependency is not None:
return (False, dependency)
return (True, 'Requirements met.')
SPEC_FILE = set([
'basic/test_option_wignore',
'basic/test_threading2',
'basic/test_onefile_ctypes',
'basic/test_onefile_pkg_resources',
'basic/test_pkg_structures',
'import/test_app_with_plugins',
'import/test_eggs2',
'import/test_hiddenimport',
'interactive/test_matplotlib', # TODO .spec for this test contain win32 specific manifest code. Do we still need it?
'libraries/test_Image',
'libraries/test_PIL',
'multipackage/test_multipackage1',
'multipackage/test_multipackage2',
'multipackage/test_multipackage3',
'multipackage/test_multipackage4',
'multipackage/test_multipackage5',
])
class BuildTestRunner(object):
def __init__(self, test_name, verbose=False, report=False):
# Use path separator '/' even on windows for test_name name.
self.test_name = test_name.replace('\\', '/')
self.verbose = verbose
self.test_dir, self.test_file = os.path.split(self.test_name)
# For junit xml report some behavior is changed.
# Especially redirecting sys.stdout.
self.report = report
def _msg(self, text):
"""
Important text. Print it to console only in verbose mode.
"""
if self.verbose:
# This allows to redirect stdout to junit xml report.
sys.stdout.write('\n' + 10 * '#' + ' ' + text + ' ' + 10 * '#' + '\n\n')
sys.stdout.flush()
def _plain_msg(self, text):
"""
Print text to console only in verbose mode.
"""
if self.verbose:
sys.stdout.write(text + '\n')
sys.stdout.flush()
def _find_exepath(self, test, parent_dir='dist'):
of_prog = os.path.join(parent_dir, test) # one-file deploy filename
od_prog = os.path.join(parent_dir, test, test) # one-dir deploy filename
prog = None
if os.path.isfile(of_prog):
prog = of_prog
elif os.path.isfile(of_prog + ".exe"):
prog = of_prog + ".exe"
elif os.path.isdir(of_prog):
if os.path.isfile(od_prog):
prog = od_prog
elif os.path.isfile(od_prog + ".exe"):
prog = od_prog + ".exe"
return prog
def _run_created_exe(self, test, testdir=None):
"""
Run executable created by PyInstaller.
"""
self._msg('EXECUTING TEST ' + self.test_name)
# Run the test in a clean environment to make sure they're
# really self-contained
path = compat.getenv('PATH')
compat.unsetenv('PATH')
prog = self._find_exepath(test, 'dist')
if prog is None:
self._plain_msg('ERROR: no file generated by PyInstaller found!')
compat.setenv("PATH", path)
return 1
else:
self._plain_msg("RUNNING: " + prog)
old_wd = os.getcwd()
os.chdir(os.path.dirname(prog))
prog = os.path.join(os.curdir, os.path.basename(prog))
retcode, out, err = compat.exec_command_all(prog)
os.chdir(old_wd)
self._msg('STDOUT %s' % self.test_name)
self._plain_msg(out)
self._msg('STDERR %s' % self.test_name)
self._plain_msg(err)
compat.setenv("PATH", path)
return retcode
def test_exists(self):
"""
Return True if test file exists.
"""
return os.path.exists(os.path.join(BASEDIR, self.test_name + '.py'))
def test_building(self):
"""
Run building of test script.
Return True if build succeded False otherwise.
"""
OPTS = ['--debug', '--noupx', '--specpath', os.getcwd(), '--distpath',
os.path.join(os.getcwd(), 'dist'), '--workpath',
os.path.join(os.getcwd(), 'build')]
if self.verbose:
OPTS.extend(['--debug', '--log-level=INFO'])
else:
OPTS.append('--log-level=ERROR')
# Build executable in onefile mode.
if self.test_file.startswith('test_onefile'):
OPTS.append('--onefile')
else:
OPTS.append('--onedir')
self._msg("BUILDING TEST " + self.test_name)
# Use pyinstaller.py for building test_name.
testfile_spec = self.test_file + '.spec'
if not os.path.exists(self.test_file + '.spec'):
# .spec file does not exist and it has to be generated
# for main script.
testfile_spec = self.test_file + '.py'
#pyinst_script = os.path.join(HOMEPATH, 'pyinstaller.py')
# TODO Fix redirecting stdout/stderr
# In report mode is stdout and sys.stderr redirected.
#if self.report:
## Write output from subprocess to stdout/err.
#retcode, out, err = compat.exec_python_all(pyinst_script,
#testfile_spec, *OPTS)
#sys.stdout.write(out)
#sys.stdout.write(err)
#else:
#retcode = compat.exec_python_rc(pyinst_script,
#testfile_spec, *OPTS)
pyi_args = [testfile_spec] + OPTS
# TODO fix return code in running PyInstaller programatically
pyi_main.run(pyi_args, PYI_CONFIG)
retcode = 0
return retcode == 0
def test_exe(self):
"""
Test running of all created executables.
"""
files = glob.glob(os.path.join('dist', self.test_file + '*'))
files.sort()
retcode = 0
for exe in files:
exe = os.path.splitext(exe)[0]
retcode_tmp = self._run_created_exe(exe[5:], self.test_dir)
retcode = retcode or retcode_tmp
return retcode == 0
def test_logs(self):
"""
Compare log files (now used only by multipackage test_name).
Return True if .toc files match or when .toc patters
are not defined.
"""
logsfn = glob.glob(self.test_file + '.toc')
# Other main scritps do not start with 'test_'.
logsfn += glob.glob(self.test_file.split('_', 1)[1] + '_?.toc')
for logfn in logsfn:
self._msg("EXECUTING MATCHING " + logfn)
tmpname = os.path.splitext(logfn)[0]
prog = self._find_exepath(tmpname)
if prog is None:
prog = self._find_exepath(tmpname,
os.path.join('dist', self.test_file))
fname_list = compat.exec_python(
os.path.join(HOMEPATH, 'utils', 'archive_viewer.py'),
'-b', '-r', prog)
# Fix line-endings so eval() does not fail.
fname_list = fname_list.replace('\r\n', '\n').replace('\n\r', '\n')
fname_list = eval(fname_list)
pattern_list = eval(open(logfn, 'rU').read())
# Alphabetical order of patterns.
pattern_list.sort()
count = 0
for pattern in pattern_list:
found = False
for fname in fname_list:
if re.match(pattern, fname):
count += 1
found = True
self._plain_msg('MATCH: %s --> %s' % (pattern, fname))
break
if not found:
self._plain_msg('MISSING: %s' % pattern)
# Not all modules matched.
# Stop comparing other .toc files and fail the test.
if count < len(pattern_list):
return False
return True
class GenericTestCase(unittest.TestCase):
def __init__(self, test_dir, func_name):
"""
test_dir Directory containing testing python scripts.
func_name Name of test function to create.
"""
self.test_name = test_dir + '/' + func_name
# Create new test fuction. This has to be done before super().
setattr(self, func_name, self._generic_test_function)
super(GenericTestCase, self).__init__(func_name)
# For tests current working directory has to be changed temporaly.
self.curr_workdir = os.getcwdu()
def setUp(self):
testdir = os.path.dirname(self.test_name)
os.chdir(os.path.join(BASEDIR, testdir)) # go to testdir
# For some 'basic' tests we need create file with path to python
# executable and if it is running in debug mode.
build_python = open(os.path.join(BASEDIR, 'basic', 'python_exe.build'),
'w')
build_python.write(sys.executable + "\n")
build_python.write('debug=%s' % __debug__ + '\n')
# On Windows we need to preserve systme PATH for subprocesses in tests.
build_python.write(os.environ.get('PATH') + '\n')
build_python.close()
def tearDown(self):
os.chdir(self.curr_workdir) # go back from testdir
def _generic_test_function(self):
# Skip test case if test requirement are not met.
s = SkipChecker()
req_met, msg = s.check(self.test_name)
if not req_met:
raise unittest.SkipTest(msg)
# Create a build and test it.
b = BuildTestRunner(self.test_name, verbose=VERBOSE, report=REPORT)
self.assertTrue(b.test_exists(),
msg='Test %s not found.' % self.test_name)
self.assertTrue(b.test_building(),
msg='Build of %s failed.' % self.test_name)
self.assertTrue(b.test_exe(),
msg='Running exe of %s failed.' % self.test_name)
self.assertTrue(b.test_logs(),
msg='Matching .toc of %s failed.' % self.test_name)
class BasicTestCase(GenericTestCase):
test_dir = 'basic'
def __init__(self, func_name):
super(BasicTestCase, self).__init__(self.test_dir, func_name)
class ImportTestCase(GenericTestCase):
test_dir = 'import'
def __init__(self, func_name):
super(ImportTestCase, self).__init__(self.test_dir, func_name)
class LibrariesTestCase(GenericTestCase):
test_dir = 'libraries'
def __init__(self, func_name):
super(LibrariesTestCase, self).__init__(self.test_dir, func_name)
class MultipackageTestCase(GenericTestCase):
test_dir = 'multipackage'
def __init__(self, func_name):
super(MultipackageTestCase, self).__init__(self.test_dir, func_name)
class InteractiveTestCase(GenericTestCase):
"""
Interactive tests require user interaction mostly GUI.
Interactive tests have to be run directly by user.
They can't be run by any continuous integration system.
"""
test_dir = 'interactive'
def __init__(self, func_name):
super(InteractiveTestCase, self).__init__(self.test_dir, func_name)
class TestCaseGenerator(object):
"""
Generate test cases.
"""
def _detect_tests(self, directory):
files = glob.glob(os.path.join(directory, 'test_*.py'))
# Test name is a file name without extension.
tests = [os.path.splitext(os.path.basename(x))[0] for x in files]
tests.sort()
return tests
def create_suite(self, test_types):
"""
Create test suite and add test cases to it.
test_types Test classes to create test cases from.
Return test suite with tests.
"""
suite = unittest.TestSuite()
for _type in test_types:
tests = self._detect_tests(_type.test_dir)
# Create test cases for a specific type.
for test_name in tests:
suite.addTest(_type(test_name))
return suite
def clean():
"""
Remove temporary files created while running tests.
"""
# Files/globs to clean up.
patterns = """python_exe.build
logdict*.log
disttest*
buildtest*
warn*.txt
*.py[co]
*/*.py[co]
*/*/*.py[co]
build/
dist/
*/*.dll
*/*.lib
*/*.obj
*/*.exp
*/*.so
*/*.dylib
""".split()
# By some directories we do not need to clean files.
# E.g. for unit tests.
IGNORE_DIRS = set([
'eggs4testing',
'unit',
])
# Remove temporary files in all subdirectories.
for directory in os.listdir(BASEDIR):
if not os.path.isdir(directory):
continue
if directory in IGNORE_DIRS:
continue
for pattern in patterns:
file_list = glob.glob(os.path.join(directory, pattern))
for pth in file_list:
try:
if os.path.isdir(pth):
shutil.rmtree(pth)
else:
os.remove(pth)
except OSError, e:
print e
# Delete *.spec files for tests without spec file.
for pth in glob.glob(os.path.join(directory, '*.spec')):
test_name = directory + '/' + os.path.splitext(os.path.basename(pth))[0]
if not test_name in SPEC_FILE:
if os.path.exists(pth):
os.remove(pth)
def run_tests(test_suite, xml_file):
"""
Run test suite and save output to junit xml file if requested.
"""
if xml_file:
print 'Writting test results to: %s' % xml_file
fp = open('report.xml', 'w')
result = junitxml.JUnitXmlResult(fp)
# Text from stdout/stderr should be added to failed test cases.
result.buffer = True
result.startTestRun()
test_suite.run(result)
result.stopTestRun()
fp.close()
else:
unittest.TextTestRunner(verbosity=2).run(test_suite)
def main():
try:
parser = optparse.OptionParser(usage='%prog [options] [TEST-NAME ...]',
epilog='TEST-NAME can be the name of the .py-file, '
'the .spec-file or only the basename.')
except TypeError:
parser = optparse.OptionParser(usage='%prog [options] [TEST-NAME ...]')
parser.add_option('-c', '--clean', action='store_true',
help='Clean up generated files')
parser.add_option('-i', '--interactive-tests', action='store_true',
help='Run interactive tests (default: run normal tests)')
parser.add_option('-v', '--verbose',
action='store_true',
default=False,
help='Verbose mode (default: %default)')
parser.add_option('--junitxml', action='store', default=None,
metavar='FILE', help='Create junit-xml style test report file')
opts, args = parser.parse_args()
# Do only cleanup.
if opts.clean:
clean()
raise SystemExit() # Exit code is 0 in this case.
# Run only specified tests.
if args:
if opts.interactive_tests:
parser.error('Must not specify -i/--interactive-tests when passing test names.')
suite = unittest.TestSuite()
for arg in args:
test_list = glob.glob(arg)
if not test_list:
test_list = [arg]
else:
test_list = [x for x in test_list if os.path.splitext(x)[1] == ".py"]
# Sort tests aplhabetically. For example test
# basic/test_nested_launch1 depends on basic/test_nested_launch0.
# Otherwise it would fail.
test_list.sort()
for t in test_list:
test_dir = os.path.dirname(t)
test_script = os.path.basename(os.path.splitext(t)[0])
suite.addTest(GenericTestCase(test_dir, test_script))
print 'Running test: %s' % (test_dir + '/' + test_script)
# Run all tests or all interactive tests.
else:
if opts.interactive_tests:
print 'Running interactive tests...'
test_classes = [InteractiveTestCase]
else:
print 'Running normal tests (-i for interactive tests)...'
test_classes = [BasicTestCase, ImportTestCase,
LibrariesTestCase, MultipackageTestCase]
# Create test suite.
generator = TestCaseGenerator()
suite = generator.create_suite(test_classes)
# Set global options
global VERBOSE, REPORT, PYI_CONFIG
VERBOSE = opts.verbose
REPORT = opts.junitxml is not None
PYI_CONFIG = configure.get_config(upx_dir=None) # Run configure phase only once.
# Run created test suite.
clean()
run_tests(suite, opts.junitxml)
if __name__ == '__main__':
main()
| apache-2.0 | 3,188,461,673,179,419,000 | 34.961933 | 121 | 0.56852 | false |
NNTin/Reply-Dota-2-Reddit | displayreddit/drmatch.py | 1 | 5099 | from steamapi.getproplayerlist import proPlayerDictionary
from steamapi.getheroes import heroDictionary
from steamapi.getgameitems import itemDictionary
from converter import timeconverter, playerconverter
def displayResult(matchJson, playerSummariesJson):
introTemplate = '####	\n#####	 ' \
'Hover to view match ID: {matchid} [DB](http://www.dotabuff.com/matches/{matchid})/' \
'[OD](https://www.opendota.com/matches/{matchid})/' \
'[STRATZ](https://stratz.com/match/{matchid})' \
'\n######	\n\n' \
'[**{teamwinner} wins {winnerkills}-{loserkills} @ {time}**](#lumbdi "{additionalinformation}")\n\n'
tableTemplate = 'Lvl | Hero | Player| K/D/A | LH/D | XPM | GPM | HD | HH | TD\n' \
':--|:--:|:--|:--|:--|:--|:--|:--|:--|:--\n'
tableLineTemplate = '{level}|{hero}|{account}|{kda}|{lhd}|{xpm}|{gpm}|{hd}|{hh}|{td}\n'
dividerTemplate = '{level}||↑Radiant↑ ↓Dire↓ |{kda}|{lhd}|{xpm}|{gpm}|{hd}|{hh}|{td}\n'
outtro = '\n\n---\n\n'
#print(introTemplate + tableTemplate + tableLineTemplate + outtroTemplate)
matchID = matchJson['result']["match_id"]
if matchJson['result']['radiant_win']:
teamwinner = 'Radiant'
winnerkills = matchJson['result']["radiant_score"]
loserkills = matchJson['result']["dire_score"]
else:
teamwinner = 'Dire'
winnerkills = matchJson['result']["dire_score"]
loserkills = matchJson['result']["radiant_score"]
time = timeconverter.durationTimeConverter(matchJson['result']["duration"])
#TODO: Provide additional information if match is tournament
matchDate = timeconverter.unixTimeConverter(matchJson['result']["start_time"])
firstBloodTime = timeconverter.durationTimeConverter(matchJson['result']["first_blood_time"])
additionalInformation = 'Match ID: %s, match date: %s, first blood time: %s' %(matchID,matchDate,firstBloodTime)
intro = introTemplate.format(matchid=matchID, teamwinner=teamwinner, winnerkills=winnerkills, loserkills=loserkills, time=time, additionalinformation=additionalInformation)
radiantTable = ''
direTable = ''
teamStats = [{'level': 0, 'kills': 0, 'deaths': 0, 'assists': 0, 'lasthits': 0, 'denies': 0,
'xpm': 0, 'gpm': 0, 'hd': 0, 'hh': 0, 'td': 0
},
{'level': 0, 'kills': 0, 'deaths': 0, 'assists': 0, 'lasthits': 0, 'denies': 0,
'xpm': 0, 'gpm': 0, 'hd': 0, 'hh': 0, 'td': 0
}]
for player in matchJson['result']['players']:
#level, hero, player, kda, lh d, xpm, gpm, hd, hh, td
stats = {'level': player['level'], 'kills': player['kills'], 'deaths': player['deaths'],
'assists': player['assists'], 'lasthits': player['last_hits'],
'denies': player['denies'], 'xpm': player["xp_per_min"], 'gpm': player["gold_per_min"],
'hd': player["hero_damage"], 'hh': player["hero_healing"], 'td': player["tower_damage"]}
hero = '[](/hero-%s)' %heroDictionary[player['hero_id']]
account = playerconverter.playerConverter(player['account_id'], playerSummariesJson)
kda = '%s/%s/%s' %(stats['kills'],stats['deaths'], stats['assists'])
lhd = '%s/%s' %(player['last_hits'], player['denies'])
if player['player_slot'] < 127: #<127 -> Radiant
radiantTable += tableLineTemplate.format(level=stats['level'], hero=hero, account=account, kda=kda, lhd=lhd, xpm=stats['xpm'],
gpm=stats['gpm'], hd=stats['hd'], hh=stats['hh'], td=stats['td'])
for stat in stats:
teamStats[0][stat] += stats[stat]
else:
direTable += tableLineTemplate.format(level=stats['level'], hero=hero, account=account, kda=kda, lhd=lhd, xpm=stats['xpm'],
gpm=stats['gpm'], hd=stats['hd'], hh=stats['hh'], td=stats['td'])
for stat in stats:
teamStats[1][stat] += stats[stat]
teamStatsDict = {}
for i in range(0, len(teamStats)):
teamStats[i]['kda'] = '%s/%s/%s' %(teamStats[i]['kills'], teamStats[i]['deaths'], teamStats[i]['assists'])
teamStats[i]['lhd'] = '%s/%s' %(teamStats[i]['lasthits'], teamStats[i]['denies'])
teamStats[i].pop('kills')
teamStats[i].pop('deaths')
teamStats[i].pop('assists')
teamStats[i].pop('lasthits')
teamStats[i].pop('denies')
for stat in teamStats[i]:
teamStatsDict[stat] = teamStatsDict.get(stat, '') + ' ' + str(teamStats[i][stat])
divider = dividerTemplate.format(level=teamStatsDict['level'], kda=teamStatsDict['kda'], lhd=teamStatsDict['lhd'], xpm=teamStatsDict['xpm'],
gpm=teamStatsDict['gpm'], hd=teamStatsDict['hd'], hh=teamStatsDict['hh'], td=teamStatsDict['td'])
table = tableTemplate + radiantTable + divider + direTable
return intro + table + '\n\n---\n\n' | mit | -7,995,903,472,055,855,000 | 54.956044 | 176 | 0.574543 | false |
UMDWeather/TheDisplayer | plugins/current/weather_underground.py | 1 | 1027 | import urllib, json
import datetime as dt
import logging
log = logging.getLogger(__name__)
################################################################################
## REQUIRED parameters:
################################################################################
## data_url - e.g. "http://api.wunderground.com/api/3a5b82718926c103/conditions/q/MD/College_Park.json"
################################################################################
dataFreq = dt.timedelta(minutes=5)
iconpath='http://icons.wxbug.com/i/c/j/##.gif'
def readData():
'''buffer the data so that we don't do reads too often'''
## is it time to get new data?
ctime = dt.datetime.now()
if ctime - readData.lastRead > dataFreq:
log.debug('downloading new weather data')
readData.lastRead = ctime
response = urllib.urlopen(data_url)
readData.data = json.loads(response.read())
return readData.data
readData.data = None
readData.lastRead = dt.datetime.now() - dt.timedelta(days=3)
| gpl-2.0 | 941,463,521,244,889,000 | 32.129032 | 103 | 0.523856 | false |
kkaushik24/python-design-patterns | structural/bridge_pattern.py | 1 | 1404 | from abc import ABCMeta, abstractmethod
class DrawingApi:
def draw_circle(self, x, y, radius):
pass
class DrawingApi1(DrawingApi):
def draw_circle(self, x, y, radius):
print "Api1 ", x, y, radius
class DrawingApi2(DrawingApi):
def draw_circle(self, x, y, radius):
print "Api1 ", x, y, radius
class Shape:
__metaclass__ = ABCMeta
def __init__(self, drawing_api):
self.drawing_api = drawing_api
@abstractmethod
def draw(self):
pass
@abstractmethod
def resize_by_percentage(pct):
pass
class CircleShape(Shape):
def __init__(self, x, y, radius, drawing_api):
super(CircleShape, self).__init__(drawing_api)
self.x = x
self.y = y
self.radius = radius
def draw(self):
self.drawing_api.draw_circle(self.x, self.y, self.radius)
def resize_by_percentage(self, pct):
self.radius = self.radius + (self.radius * pct / 100)
return self.radius
if __name__ == '__main__':
drawing_api1 = DrawingApi1()
drawing_api2 = DrawingApi2()
circle_shape1 = CircleShape(1, 2, 4, drawing_api1)
circle_shape2 = CircleShape(4, 8, 12, drawing_api2)
circle_shape1.draw()
print 'resized circle1 radius', circle_shape1.resize_by_percentage(40)
circle_shape2.draw()
print 'resized circle2 radius', circle_shape2.resize_by_percentage(50)
| apache-2.0 | 922,079,697,343,518,000 | 22.79661 | 74 | 0.625356 | false |
jaeilepp/mne-python | mne/coreg.py | 1 | 42970 | """Coregistration between different coordinate frames."""
# Authors: Christian Brodbeck <[email protected]>
#
# License: BSD (3-clause)
from .externals.six.moves import configparser
from .externals.six import string_types
import fnmatch
from glob import glob, iglob
import os
import stat
import sys
import re
import shutil
from warnings import warn
from functools import reduce
import numpy as np
from numpy import dot
from .io import read_fiducials, write_fiducials, read_info
from .io.constants import FIFF
from .label import read_label, Label
from .source_space import (add_source_space_distances, read_source_spaces,
write_source_spaces)
from .surface import read_surface, write_surface, _normalize_vectors
from .bem import read_bem_surfaces, write_bem_surfaces
from .transforms import rotation, rotation3d, scaling, translation, Transform
from .utils import get_config, get_subjects_dir, logger, pformat
from .viz._3d import _fiducial_coords
from .externals.six.moves import zip
# some path templates
trans_fname = os.path.join('{raw_dir}', '{subject}-trans.fif')
subject_dirname = os.path.join('{subjects_dir}', '{subject}')
bem_dirname = os.path.join(subject_dirname, 'bem')
surf_dirname = os.path.join(subject_dirname, 'surf')
bem_fname = os.path.join(bem_dirname, "{subject}-{name}.fif")
head_bem_fname = pformat(bem_fname, name='head')
fid_fname = pformat(bem_fname, name='fiducials')
fid_fname_general = os.path.join(bem_dirname, "{head}-fiducials.fif")
src_fname = os.path.join(bem_dirname, '{subject}-{spacing}-src.fif')
_head_fnames = (head_bem_fname, pformat(bem_fname, name='head-medium'))
_high_res_head_fnames = (os.path.join(bem_dirname, '{subject}-head-dense.fif'),
os.path.join(surf_dirname, 'lh.seghead'),
os.path.join(surf_dirname, 'lh.smseghead'))
def _make_writable(fname):
"""Make a file writable."""
os.chmod(fname, stat.S_IMODE(os.lstat(fname)[stat.ST_MODE]) | 128) # write
def _make_writable_recursive(path):
"""Recursively set writable."""
if sys.platform.startswith('win'):
return # can't safely set perms
for root, dirs, files in os.walk(path, topdown=False):
for f in dirs + files:
_make_writable(os.path.join(root, f))
def _find_head_bem(subject, subjects_dir, high_res=False):
"""Find a high resolution head."""
# XXX this should be refactored with mne.surface.get_head_surf ...
fnames = _high_res_head_fnames if high_res else _head_fnames
for fname in fnames:
path = fname.format(subjects_dir=subjects_dir, subject=subject)
if os.path.exists(path):
return path
def coregister_fiducials(info, fiducials, tol=0.01):
"""Create a head-MRI transform by aligning 3 fiducial points.
Parameters
----------
info : Info
Measurement info object with fiducials in head coordinate space.
fiducials : str | list of dict
Fiducials in MRI coordinate space (either path to a ``*-fiducials.fif``
file or list of fiducials as returned by :func:`read_fiducials`.
Returns
-------
trans : Transform
The device-MRI transform.
"""
if isinstance(info, string_types):
info = read_info(info)
if isinstance(fiducials, string_types):
fiducials, coord_frame_to = read_fiducials(fiducials)
else:
coord_frame_to = FIFF.FIFFV_COORD_MRI
frames_from = {d['coord_frame'] for d in info['dig']}
if len(frames_from) > 1:
raise ValueError("info contains fiducials from different coordinate "
"frames")
else:
coord_frame_from = frames_from.pop()
coords_from = _fiducial_coords(info['dig'])
coords_to = _fiducial_coords(fiducials, coord_frame_to)
trans = fit_matched_points(coords_from, coords_to, tol=tol)
return Transform(coord_frame_from, coord_frame_to, trans)
def create_default_subject(fs_home=None, update=False,
subjects_dir=None):
"""Create an average brain subject for subjects without structural MRI.
Create a copy of fsaverage from the Freesurfer directory in subjects_dir
and add auxiliary files from the mne package.
Parameters
----------
fs_home : None | str
The freesurfer home directory (only needed if FREESURFER_HOME is not
specified as environment variable).
update : bool
In cases where a copy of the fsaverage brain already exists in the
subjects_dir, this option allows to only copy files that don't already
exist in the fsaverage directory.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable
(os.environ['SUBJECTS_DIR']) as destination for the new subject.
Notes
-----
When no structural MRI is available for a subject, an average brain can be
substituted. Freesurfer comes with such an average brain model, and MNE
comes with some auxiliary files which make coregistration easier (see
:ref:`CACGEAFI`). :py:func:`create_default_subject` copies the relevant
files from Freesurfer into the current subjects_dir, and also adds the
auxiliary files provided by MNE.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
if fs_home is None:
fs_home = get_config('FREESURFER_HOME', fs_home)
if fs_home is None:
raise ValueError(
"FREESURFER_HOME environment variable not found. Please "
"specify the fs_home parameter in your call to "
"create_default_subject().")
# make sure freesurfer files exist
fs_src = os.path.join(fs_home, 'subjects', 'fsaverage')
if not os.path.exists(fs_src):
raise IOError('fsaverage not found at %r. Is fs_home specified '
'correctly?' % fs_src)
for name in ('label', 'mri', 'surf'):
dirname = os.path.join(fs_src, name)
if not os.path.isdir(dirname):
raise IOError("Freesurfer fsaverage seems to be incomplete: No "
"directory named %s found in %s" % (name, fs_src))
# make sure destination does not already exist
dest = os.path.join(subjects_dir, 'fsaverage')
if dest == fs_src:
raise IOError(
"Your subjects_dir points to the freesurfer subjects_dir (%r). "
"The default subject can not be created in the freesurfer "
"installation directory; please specify a different "
"subjects_dir." % subjects_dir)
elif (not update) and os.path.exists(dest):
raise IOError(
"Can not create fsaverage because %r already exists in "
"subjects_dir %r. Delete or rename the existing fsaverage "
"subject folder." % ('fsaverage', subjects_dir))
# copy fsaverage from freesurfer
logger.info("Copying fsaverage subject from freesurfer directory...")
if (not update) or not os.path.exists(dest):
shutil.copytree(fs_src, dest)
_make_writable_recursive(dest)
# copy files from mne
source_fname = os.path.join(os.path.dirname(__file__), 'data', 'fsaverage',
'fsaverage-%s.fif')
dest_bem = os.path.join(dest, 'bem')
if not os.path.exists(dest_bem):
os.mkdir(dest_bem)
logger.info("Copying auxiliary fsaverage files from mne...")
dest_fname = os.path.join(dest_bem, 'fsaverage-%s.fif')
_make_writable_recursive(dest_bem)
for name in ('fiducials', 'head', 'inner_skull-bem', 'trans'):
if not os.path.exists(dest_fname % name):
shutil.copy(source_fname % name, dest_bem)
def _decimate_points(pts, res=10):
"""Decimate the number of points using a voxel grid.
Create a voxel grid with a specified resolution and retain at most one
point per voxel. For each voxel, the point closest to its center is
retained.
Parameters
----------
pts : array, shape (n_points, 3)
The points making up the head shape.
res : scalar
The resolution of the voxel space (side length of each voxel).
Returns
-------
pts : array, shape = (n_points, 3)
The decimated points.
"""
from scipy.spatial.distance import cdist
pts = np.asarray(pts)
# find the bin edges for the voxel space
xmin, ymin, zmin = pts.min(0) - res / 2.
xmax, ymax, zmax = pts.max(0) + res
xax = np.arange(xmin, xmax, res)
yax = np.arange(ymin, ymax, res)
zax = np.arange(zmin, zmax, res)
# find voxels containing one or more point
H, _ = np.histogramdd(pts, bins=(xax, yax, zax), normed=False)
# for each voxel, select one point
X, Y, Z = pts.T
out = np.empty((np.sum(H > 0), 3))
for i, (xbin, ybin, zbin) in enumerate(zip(*np.nonzero(H))):
x = xax[xbin]
y = yax[ybin]
z = zax[zbin]
xi = np.logical_and(X >= x, X < x + res)
yi = np.logical_and(Y >= y, Y < y + res)
zi = np.logical_and(Z >= z, Z < z + res)
idx = np.logical_and(zi, np.logical_and(yi, xi))
ipts = pts[idx]
mid = np.array([x, y, z]) + res / 2.
dist = cdist(ipts, [mid])
i_min = np.argmin(dist)
ipt = ipts[i_min]
out[i] = ipt
return out
def _trans_from_params(param_info, params):
"""Convert transformation parameters into a transformation matrix.
Parameters
----------
param_info : tuple, len = 3
Tuple describing the parameters in x (do_translate, do_rotate,
do_scale).
params : tuple
The transformation parameters.
Returns
-------
trans : array, shape = (4, 4)
Transformation matrix.
"""
do_rotate, do_translate, do_scale = param_info
i = 0
trans = []
if do_rotate:
x, y, z = params[:3]
trans.append(rotation(x, y, z))
i += 3
if do_translate:
x, y, z = params[i:i + 3]
trans.insert(0, translation(x, y, z))
i += 3
if do_scale == 1:
s = params[i]
trans.append(scaling(s, s, s))
elif do_scale == 3:
x, y, z = params[i:i + 3]
trans.append(scaling(x, y, z))
trans = reduce(dot, trans)
return trans
def fit_matched_points(src_pts, tgt_pts, rotate=True, translate=True,
scale=False, tol=None, x0=None, out='trans'):
"""Find a transform between matched sets of points.
This minimizes the squared distance between two matching sets of points.
Uses :func:`scipy.optimize.leastsq` to find a transformation involving
a combination of rotation, translation, and scaling (in that order).
Parameters
----------
src_pts : array, shape = (n, 3)
Points to which the transform should be applied.
tgt_pts : array, shape = (n, 3)
Points to which src_pts should be fitted. Each point in tgt_pts should
correspond to the point in src_pts with the same index.
rotate : bool
Allow rotation of the ``src_pts``.
translate : bool
Allow translation of the ``src_pts``.
scale : bool
Number of scaling parameters. With False, points are not scaled. With
True, points are scaled by the same factor along all axes.
tol : scalar | None
The error tolerance. If the distance between any of the matched points
exceeds this value in the solution, a RuntimeError is raised. With
None, no error check is performed.
x0 : None | tuple
Initial values for the fit parameters.
out : 'params' | 'trans'
In what format to return the estimate: 'params' returns a tuple with
the fit parameters; 'trans' returns a transformation matrix of shape
(4, 4).
Returns
-------
One of the following, depending on the ``out`` parameter:
trans : array, shape = (4, 4)
Transformation that, if applied to src_pts, minimizes the squared
distance to tgt_pts.
params : array, shape = (n_params, )
A single tuple containing the translation, rotation and scaling
parameters in that order.
"""
from scipy.optimize import leastsq
src_pts = np.atleast_2d(src_pts)
tgt_pts = np.atleast_2d(tgt_pts)
if src_pts.shape != tgt_pts.shape:
raise ValueError("src_pts and tgt_pts must have same shape (got "
"{0}, {1})".format(src_pts.shape, tgt_pts.shape))
rotate = bool(rotate)
translate = bool(translate)
scale = int(scale)
if translate:
src_pts = np.hstack((src_pts, np.ones((len(src_pts), 1))))
param_info = (rotate, translate, scale)
if param_info == (True, False, 0):
def error(x):
rx, ry, rz = x
trans = rotation3d(rx, ry, rz)
est = dot(src_pts, trans.T)
return (tgt_pts - est).ravel()
if x0 is None:
x0 = (0, 0, 0)
elif param_info == (True, False, 1):
def error(x):
rx, ry, rz, s = x
trans = rotation3d(rx, ry, rz) * s
est = dot(src_pts, trans.T)
return (tgt_pts - est).ravel()
if x0 is None:
x0 = (0, 0, 0, 1)
elif param_info == (True, True, 0):
def error(x):
rx, ry, rz, tx, ty, tz = x
trans = dot(translation(tx, ty, tz), rotation(rx, ry, rz))
est = dot(src_pts, trans.T)
return (tgt_pts - est[:, :3]).ravel()
if x0 is None:
x0 = (0, 0, 0, 0, 0, 0)
elif param_info == (True, True, 1):
def error(x):
rx, ry, rz, tx, ty, tz, s = x
trans = reduce(dot, (translation(tx, ty, tz), rotation(rx, ry, rz),
scaling(s, s, s)))
est = dot(src_pts, trans.T)
return (tgt_pts - est[:, :3]).ravel()
if x0 is None:
x0 = (0, 0, 0, 0, 0, 0, 1)
else:
raise NotImplementedError(
"The specified parameter combination is not implemented: "
"rotate=%r, translate=%r, scale=%r" % param_info)
x, _, _, _, _ = leastsq(error, x0, full_output=True)
# re-create the final transformation matrix
if (tol is not None) or (out == 'trans'):
trans = _trans_from_params(param_info, x)
# assess the error of the solution
if tol is not None:
if not translate:
src_pts = np.hstack((src_pts, np.ones((len(src_pts), 1))))
est_pts = dot(src_pts, trans.T)[:, :3]
err = np.sqrt(np.sum((est_pts - tgt_pts) ** 2, axis=1))
if np.any(err > tol):
raise RuntimeError("Error exceeds tolerance. Error = %r" % err)
if out == 'params':
return x
elif out == 'trans':
return trans
else:
raise ValueError("Invalid out parameter: %r. Needs to be 'params' or "
"'trans'." % out)
def _point_cloud_error(src_pts, tgt_pts):
"""Find the distance from each source point to its closest target point.
Parameters
----------
src_pts : array, shape = (n, 3)
Source points.
tgt_pts : array, shape = (m, 3)
Target points.
Returns
-------
dist : array, shape = (n, )
For each point in ``src_pts``, the distance to the closest point in
``tgt_pts``.
"""
from scipy.spatial.distance import cdist
Y = cdist(src_pts, tgt_pts, 'euclidean')
dist = Y.min(axis=1)
return dist
def _point_cloud_error_balltree(src_pts, tgt_tree):
"""Find the distance from each source point to its closest target point.
Uses sklearn.neighbors.BallTree for greater efficiency
Parameters
----------
src_pts : array, shape = (n, 3)
Source points.
tgt_tree : sklearn.neighbors.BallTree
BallTree of the target points.
Returns
-------
dist : array, shape = (n, )
For each point in ``src_pts``, the distance to the closest point in
``tgt_pts``.
"""
dist, _ = tgt_tree.query(src_pts)
return dist.ravel()
def fit_point_cloud(src_pts, tgt_pts, rotate=True, translate=True,
scale=0, x0=None, leastsq_args={}, out='params'):
"""Find a transform between unmatched sets of points.
This minimizes the squared distance from each source point to its closest
target point, using :func:`scipy.optimize.leastsq` to find a
transformation using rotation, translation, and scaling (in that order).
Parameters
----------
src_pts : array, shape = (n, 3)
Points to which the transform should be applied.
tgt_pts : array, shape = (m, 3)
Points to which src_pts should be fitted. Each point in tgt_pts should
correspond to the point in src_pts with the same index.
rotate : bool
Allow rotation of the ``src_pts``.
translate : bool
Allow translation of the ``src_pts``.
scale : 0 | 1 | 3
Number of scaling parameters. With 0, points are not scaled. With 1,
points are scaled by the same factor along all axes. With 3, points are
scaled by a separate factor along each axis.
x0 : None | tuple
Initial values for the fit parameters.
leastsq_args : dict
Additional parameters to submit to :func:`scipy.optimize.leastsq`.
out : 'params' | 'trans'
In what format to return the estimate: 'params' returns a tuple with
the fit parameters; 'trans' returns a transformation matrix of shape
(4, 4).
Returns
-------
x : array, shape = (n_params, )
Estimated parameters for the transformation.
Notes
-----
Assumes that the target points form a dense enough point cloud so that
the distance of each src_pt to the closest tgt_pt can be used as an
estimate of the distance of src_pt to tgt_pts.
"""
from scipy.optimize import leastsq
kwargs = {'epsfcn': 0.01}
kwargs.update(leastsq_args)
# assert correct argument types
src_pts = np.atleast_2d(src_pts)
tgt_pts = np.atleast_2d(tgt_pts)
translate = bool(translate)
rotate = bool(rotate)
scale = int(scale)
if translate:
src_pts = np.hstack((src_pts, np.ones((len(src_pts), 1))))
try:
from sklearn.neighbors import BallTree
tgt_pts = BallTree(tgt_pts)
errfunc = _point_cloud_error_balltree
except ImportError:
warn("Sklearn could not be imported. Fitting points will be slower. "
"To improve performance, install the sklearn module.")
errfunc = _point_cloud_error
# for efficiency, define parameter specific error function
param_info = (rotate, translate, scale)
if param_info == (True, False, 0):
x0 = x0 or (0, 0, 0)
def error(x):
rx, ry, rz = x
trans = rotation3d(rx, ry, rz)
est = dot(src_pts, trans.T)
err = errfunc(est, tgt_pts)
return err
elif param_info == (True, False, 1):
x0 = x0 or (0, 0, 0, 1)
def error(x):
rx, ry, rz, s = x
trans = rotation3d(rx, ry, rz) * s
est = dot(src_pts, trans.T)
err = errfunc(est, tgt_pts)
return err
elif param_info == (True, False, 3):
x0 = x0 or (0, 0, 0, 1, 1, 1)
def error(x):
rx, ry, rz, sx, sy, sz = x
trans = rotation3d(rx, ry, rz) * [sx, sy, sz]
est = dot(src_pts, trans.T)
err = errfunc(est, tgt_pts)
return err
elif param_info == (True, True, 0):
x0 = x0 or (0, 0, 0, 0, 0, 0)
def error(x):
rx, ry, rz, tx, ty, tz = x
trans = dot(translation(tx, ty, tz), rotation(rx, ry, rz))
est = dot(src_pts, trans.T)
err = errfunc(est[:, :3], tgt_pts)
return err
else:
raise NotImplementedError(
"The specified parameter combination is not implemented: "
"rotate=%r, translate=%r, scale=%r" % param_info)
est, _, info, msg, _ = leastsq(error, x0, full_output=True, **kwargs)
logger.debug("fit_point_cloud leastsq (%i calls) info: %s", info['nfev'],
msg)
if out == 'params':
return est
elif out == 'trans':
return _trans_from_params(param_info, est)
else:
raise ValueError("Invalid out parameter: %r. Needs to be 'params' or "
"'trans'." % out)
def _find_label_paths(subject='fsaverage', pattern=None, subjects_dir=None):
"""Find paths to label files in a subject's label directory.
Parameters
----------
subject : str
Name of the mri subject.
pattern : str | None
Pattern for finding the labels relative to the label directory in the
MRI subject directory (e.g., "aparc/*.label" will find all labels
in the "subject/label/aparc" directory). With None, find all labels.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable
(sys.environ['SUBJECTS_DIR'])
Returns
-------
paths : list
List of paths relative to the subject's label directory
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
subject_dir = os.path.join(subjects_dir, subject)
lbl_dir = os.path.join(subject_dir, 'label')
if pattern is None:
paths = []
for dirpath, _, filenames in os.walk(lbl_dir):
rel_dir = os.path.relpath(dirpath, lbl_dir)
for filename in fnmatch.filter(filenames, '*.label'):
path = os.path.join(rel_dir, filename)
paths.append(path)
else:
paths = [os.path.relpath(path, lbl_dir) for path in iglob(pattern)]
return paths
def _find_mri_paths(subject, skip_fiducials, subjects_dir):
"""Find all files of an mri relevant for source transformation.
Parameters
----------
subject : str
Name of the mri subject.
skip_fiducials : bool
Do not scale the MRI fiducials. If False, an IOError will be raised
if no fiducials file can be found.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable
(sys.environ['SUBJECTS_DIR'])
Returns
-------
paths : dict
Dictionary whose keys are relevant file type names (str), and whose
values are lists of paths.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
paths = {}
# directories to create
paths['dirs'] = [bem_dirname, surf_dirname]
# surf/ files
paths['surf'] = surf = []
surf_fname = os.path.join(surf_dirname, '{name}')
surf_names = ('inflated', 'sphere', 'sphere.reg', 'white', 'orig',
'orig_avg', 'inflated_avg', 'inflated_pre', 'pial',
'pial_avg', 'smoothwm', 'white_avg', 'sphere.reg.avg')
if os.getenv('_MNE_FEW_SURFACES', '') == 'true': # for testing
surf_names = surf_names[:4]
for surf_name in surf_names:
for hemi in ('lh.', 'rh.'):
name = hemi + surf_name
path = surf_fname.format(subjects_dir=subjects_dir,
subject=subject, name=name)
if os.path.exists(path):
surf.append(pformat(surf_fname, name=name))
# BEM files
paths['bem'] = bem = []
path = head_bem_fname.format(subjects_dir=subjects_dir, subject=subject)
if os.path.exists(path):
bem.append('head')
bem_pattern = pformat(bem_fname, subjects_dir=subjects_dir,
subject=subject, name='*-bem')
re_pattern = pformat(bem_fname, subjects_dir=subjects_dir, subject=subject,
name='(.+)').replace('\\', '\\\\')
for path in iglob(bem_pattern):
match = re.match(re_pattern, path)
name = match.group(1)
bem.append(name)
# fiducials
if skip_fiducials:
paths['fid'] = []
else:
paths['fid'] = _find_fiducials_files(subject, subjects_dir)
# check that we found at least one
if len(paths['fid']) == 0:
raise IOError("No fiducials file found for %s. The fiducials "
"file should be named "
"{subject}/bem/{subject}-fiducials.fif. In "
"order to scale an MRI without fiducials set "
"skip_fiducials=True." % subject)
# duplicate curvature files
paths['duplicate'] = dup = []
path = os.path.join(surf_dirname, '{name}')
for name in ['lh.curv', 'rh.curv']:
fname = pformat(path, name=name)
dup.append(fname)
# check presence of required files
for ftype in ['surf', 'duplicate']:
for fname in paths[ftype]:
path = fname.format(subjects_dir=subjects_dir, subject=subject)
path = os.path.realpath(path)
if not os.path.exists(path):
raise IOError("Required file not found: %r" % path)
# find source space files
paths['src'] = src = []
bem_dir = bem_dirname.format(subjects_dir=subjects_dir, subject=subject)
fnames = fnmatch.filter(os.listdir(bem_dir), '*-src.fif')
prefix = subject + '-'
for fname in fnames:
if fname.startswith(prefix):
fname = "{subject}-%s" % fname[len(prefix):]
path = os.path.join(bem_dirname, fname)
src.append(path)
return paths
def _find_fiducials_files(subject, subjects_dir):
"""Find fiducial files."""
fid = []
# standard fiducials
if os.path.exists(fid_fname.format(subjects_dir=subjects_dir,
subject=subject)):
fid.append(fid_fname)
# fiducials with subject name
pattern = pformat(fid_fname_general, subjects_dir=subjects_dir,
subject=subject, head='*')
regex = pformat(fid_fname_general, subjects_dir=subjects_dir,
subject=subject, head='(.+)').replace('\\', '\\\\')
for path in iglob(pattern):
match = re.match(regex, path)
head = match.group(1).replace(subject, '{subject}')
fid.append(pformat(fid_fname_general, head=head))
return fid
def _is_mri_subject(subject, subjects_dir=None):
"""Check whether a directory in subjects_dir is an mri subject directory.
Parameters
----------
subject : str
Name of the potential subject/directory.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
Returns
-------
is_mri_subject : bool
Whether ``subject`` is an mri subject.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
return bool(_find_head_bem(subject, subjects_dir) or
_find_head_bem(subject, subjects_dir, high_res=True))
def _is_scaled_mri_subject(subject, subjects_dir=None):
"""Check whether a directory in subjects_dir is a scaled mri subject.
Parameters
----------
subject : str
Name of the potential subject/directory.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
Returns
-------
is_scaled_mri_subject : bool
Whether ``subject`` is a scaled mri subject.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
if not _is_mri_subject(subject, subjects_dir):
return False
fname = os.path.join(subjects_dir, subject, 'MRI scaling parameters.cfg')
return os.path.exists(fname)
def _mri_subject_has_bem(subject, subjects_dir=None):
"""Check whether an mri subject has a file matching the bem pattern.
Parameters
----------
subject : str
Name of the subject.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
Returns
-------
has_bem_file : bool
Whether ``subject`` has a bem file.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
pattern = bem_fname.format(subjects_dir=subjects_dir, subject=subject,
name='*-bem')
fnames = glob(pattern)
return bool(len(fnames))
def read_mri_cfg(subject, subjects_dir=None):
"""Read information from the cfg file of a scaled MRI brain.
Parameters
----------
subject : str
Name of the scaled MRI subject.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
Returns
-------
cfg : dict
Dictionary with entries from the MRI's cfg file.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
fname = os.path.join(subjects_dir, subject, 'MRI scaling parameters.cfg')
if not os.path.exists(fname):
raise IOError("%r does not seem to be a scaled mri subject: %r does "
"not exist." % (subject, fname))
logger.info("Reading MRI cfg file %s" % fname)
config = configparser.RawConfigParser()
config.read(fname)
n_params = config.getint("MRI Scaling", 'n_params')
if n_params == 1:
scale = config.getfloat("MRI Scaling", 'scale')
elif n_params == 3:
scale_str = config.get("MRI Scaling", 'scale')
scale = np.array([float(s) for s in scale_str.split()])
else:
raise ValueError("Invalid n_params value in MRI cfg: %i" % n_params)
out = {'subject_from': config.get("MRI Scaling", 'subject_from'),
'n_params': n_params, 'scale': scale}
return out
def _write_mri_config(fname, subject_from, subject_to, scale):
"""Write the cfg file describing a scaled MRI subject.
Parameters
----------
fname : str
Target file.
subject_from : str
Name of the source MRI subject.
subject_to : str
Name of the scaled MRI subject.
scale : float | array_like, shape = (3,)
The scaling parameter.
"""
scale = np.asarray(scale)
if np.isscalar(scale) or scale.shape == ():
n_params = 1
else:
n_params = 3
config = configparser.RawConfigParser()
config.add_section("MRI Scaling")
config.set("MRI Scaling", 'subject_from', subject_from)
config.set("MRI Scaling", 'subject_to', subject_to)
config.set("MRI Scaling", 'n_params', str(n_params))
if n_params == 1:
config.set("MRI Scaling", 'scale', str(scale))
else:
config.set("MRI Scaling", 'scale', ' '.join([str(s) for s in scale]))
config.set("MRI Scaling", 'version', '1')
with open(fname, 'w') as fid:
config.write(fid)
def _scale_params(subject_to, subject_from, scale, subjects_dir):
"""Assemble parameters for scaling.
Returns
-------
subjects_dir : str
Subjects directory.
subject_from : str
Name of the source subject.
scale : array
Scaling factor, either shape=() for uniform scaling or shape=(3,) for
non-uniform scaling.
nn_scale : None | array
Scaling factor for surface normal. If scaling is uniform, normals are
unchanged and nn_scale is None. If scaling is non-uniform nn_scale is
an array of shape (3,).
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
if (subject_from is None) != (scale is None):
raise TypeError("Need to provide either both subject_from and scale "
"parameters, or neither.")
if subject_from is None:
cfg = read_mri_cfg(subject_to, subjects_dir)
subject_from = cfg['subject_from']
n_params = cfg['n_params']
scale = cfg['scale']
else:
scale = np.asarray(scale)
if scale.ndim == 0:
n_params = 1
elif scale.shape == (3,):
n_params = 3
else:
raise ValueError("Invalid shape for scale parameer. Need scalar "
"or array of length 3. Got %s." % str(scale))
# prepare scaling parameter for normals
if n_params == 1:
nn_scale = None
elif n_params == 3:
nn_scale = 1. / scale
else:
raise RuntimeError("Invalid n_params value: %s" % repr(n_params))
return subjects_dir, subject_from, scale, nn_scale
def scale_bem(subject_to, bem_name, subject_from=None, scale=None,
subjects_dir=None):
"""Scale a bem file.
Parameters
----------
subject_to : str
Name of the scaled MRI subject (the destination mri subject).
bem_name : str
Name of the bem file. For example, to scale
``fsaverage-inner_skull-bem.fif``, the bem_name would be
"inner_skull-bem".
subject_from : None | str
The subject from which to read the source space. If None, subject_from
is read from subject_to's config file.
scale : None | float | array, shape = (3,)
Scaling factor. Has to be specified if subjects_from is specified,
otherwise it is read from subject_to's config file.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
"""
subjects_dir, subject_from, scale, nn_scale = \
_scale_params(subject_to, subject_from, scale, subjects_dir)
src = bem_fname.format(subjects_dir=subjects_dir, subject=subject_from,
name=bem_name)
dst = bem_fname.format(subjects_dir=subjects_dir, subject=subject_to,
name=bem_name)
if os.path.exists(dst):
raise IOError("File alredy exists: %s" % dst)
surfs = read_bem_surfaces(src)
for surf in surfs:
surf['rr'] *= scale
if nn_scale is not None:
assert len(surf['nn']) > 0
surf['nn'] *= nn_scale
_normalize_vectors(surf['nn'])
write_bem_surfaces(dst, surfs)
def scale_labels(subject_to, pattern=None, overwrite=False, subject_from=None,
scale=None, subjects_dir=None):
r"""Scale labels to match a brain that was previously created by scaling.
Parameters
----------
subject_to : str
Name of the scaled MRI subject (the destination brain).
pattern : str | None
Pattern for finding the labels relative to the label directory in the
MRI subject directory (e.g., "lh.BA3a.label" will scale
"fsaverage/label/lh.BA3a.label"; "aparc/\*.label" will find all labels
in the "fsaverage/label/aparc" directory). With None, scale all labels.
overwrite : bool
Overwrite any label file that already exists for subject_to (otherwise
existsing labels are skipped).
subject_from : None | str
Name of the original MRI subject (the brain that was scaled to create
subject_to). If None, the value is read from subject_to's cfg file.
scale : None | float | array_like, shape = (3,)
Scaling parameter. If None, the value is read from subject_to's cfg
file.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
"""
# read parameters from cfg
if scale is None or subject_from is None:
cfg = read_mri_cfg(subject_to, subjects_dir)
if subject_from is None:
subject_from = cfg['subject_from']
if scale is None:
scale = cfg['scale']
# find labels
paths = _find_label_paths(subject_from, pattern, subjects_dir)
if not paths:
return
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
src_root = os.path.join(subjects_dir, subject_from, 'label')
dst_root = os.path.join(subjects_dir, subject_to, 'label')
# scale labels
for fname in paths:
dst = os.path.join(dst_root, fname)
if not overwrite and os.path.exists(dst):
continue
dirname = os.path.dirname(dst)
if not os.path.exists(dirname):
os.makedirs(dirname)
src = os.path.join(src_root, fname)
l_old = read_label(src)
pos = l_old.pos * scale
l_new = Label(l_old.vertices, pos, l_old.values, l_old.hemi,
l_old.comment, subject=subject_to)
l_new.save(dst)
def scale_mri(subject_from, subject_to, scale, overwrite=False,
subjects_dir=None, skip_fiducials=False, labels=True,
annot=False):
"""Create a scaled copy of an MRI subject.
Parameters
----------
subject_from : str
Name of the subject providing the MRI.
subject_to : str
New subject name for which to save the scaled MRI.
scale : float | array_like, shape = (3,)
The scaling factor (one or 3 parameters).
overwrite : bool
If an MRI already exists for subject_to, overwrite it.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
skip_fiducials : bool
Do not scale the MRI fiducials. If False (default), an IOError will be
raised if no fiducials file can be found.
labels : bool
Also scale all labels (default True).
annot : bool
Copy ``*.annot`` files to the new location (default False).
See Also
--------
scale_labels : add labels to a scaled MRI
scale_source_space : add a source space to a scaled MRI
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
paths = _find_mri_paths(subject_from, skip_fiducials, subjects_dir)
scale = np.asarray(scale)
# make sure we have an empty target directory
dest = subject_dirname.format(subject=subject_to,
subjects_dir=subjects_dir)
if os.path.exists(dest):
if overwrite:
shutil.rmtree(dest)
else:
raise IOError("Subject directory for %s already exists: %r"
% (subject_to, dest))
# create empty directory structure
for dirname in paths['dirs']:
dir_ = dirname.format(subject=subject_to, subjects_dir=subjects_dir)
os.makedirs(dir_)
# save MRI scaling parameters
fname = os.path.join(dest, 'MRI scaling parameters.cfg')
_write_mri_config(fname, subject_from, subject_to, scale)
# surf files [in mm]
for fname in paths['surf']:
src = fname.format(subject=subject_from, subjects_dir=subjects_dir)
src = os.path.realpath(src)
dest = fname.format(subject=subject_to, subjects_dir=subjects_dir)
pts, tri = read_surface(src)
write_surface(dest, pts * scale, tri)
# BEM files [in m]
for bem_name in paths['bem']:
scale_bem(subject_to, bem_name, subject_from, scale, subjects_dir)
# fiducials [in m]
for fname in paths['fid']:
src = fname.format(subject=subject_from, subjects_dir=subjects_dir)
src = os.path.realpath(src)
pts, cframe = read_fiducials(src)
for pt in pts:
pt['r'] = pt['r'] * scale
dest = fname.format(subject=subject_to, subjects_dir=subjects_dir)
write_fiducials(dest, pts, cframe)
# duplicate files
for fname in paths['duplicate']:
src = fname.format(subject=subject_from, subjects_dir=subjects_dir)
dest = fname.format(subject=subject_to, subjects_dir=subjects_dir)
shutil.copyfile(src, dest)
# source spaces
for fname in paths['src']:
src_name = os.path.basename(fname)
scale_source_space(subject_to, src_name, subject_from, scale,
subjects_dir)
# labels [in m]
os.mkdir(os.path.join(subjects_dir, subject_to, 'label'))
if labels:
scale_labels(subject_to, subject_from=subject_from, scale=scale,
subjects_dir=subjects_dir)
# copy *.annot files (they don't contain scale-dependent information)
if annot:
src_pattern = os.path.join(subjects_dir, subject_from, 'label',
'*.annot')
dst_dir = os.path.join(subjects_dir, subject_to, 'label')
for src_file in iglob(src_pattern):
shutil.copy(src_file, dst_dir)
def scale_source_space(subject_to, src_name, subject_from=None, scale=None,
subjects_dir=None, n_jobs=1):
"""Scale a source space for an mri created with scale_mri().
Parameters
----------
subject_to : str
Name of the scaled MRI subject (the destination mri subject).
src_name : str
Source space name. Can be a spacing parameter (e.g., ``'7'``,
``'ico4'``, ``'oct6'``) or a file name of a source space file relative
to the bem directory; if the file name contains the subject name, it
should be indicated as "{subject}" in ``src_name`` (e.g.,
``"{subject}-my_source_space-src.fif"``).
subject_from : None | str
The subject from which to read the source space. If None, subject_from
is read from subject_to's config file.
scale : None | float | array, shape = (3,)
Scaling factor. Has to be specified if subjects_from is specified,
otherwise it is read from subject_to's config file.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
n_jobs : int
Number of jobs to run in parallel if recomputing distances (only
applies if scale is an array of length 3, and will not use more cores
than there are source spaces).
"""
subjects_dir, subject_from, scale, nn_scale = \
_scale_params(subject_to, subject_from, scale, subjects_dir)
# find the source space file names
if src_name.isdigit():
spacing = src_name # spacing in mm
src_pattern = src_fname
else:
match = re.match("(oct|ico)-?(\d+)$", src_name)
if match:
spacing = '-'.join(match.groups())
src_pattern = src_fname
else:
spacing = None
src_pattern = os.path.join(bem_dirname, src_name)
src = src_pattern.format(subjects_dir=subjects_dir, subject=subject_from,
spacing=spacing)
dst = src_pattern.format(subjects_dir=subjects_dir, subject=subject_to,
spacing=spacing)
# read and scale the source space [in m]
sss = read_source_spaces(src)
logger.info("scaling source space %s: %s -> %s", spacing, subject_from,
subject_to)
logger.info("Scale factor: %s", scale)
add_dist = False
for ss in sss:
ss['subject_his_id'] = subject_to
ss['rr'] *= scale
# distances and patch info
if nn_scale is None: # i.e. uniform scaling
if ss['dist'] is not None:
ss['dist'] *= scale
ss['nearest_dist'] *= scale
ss['dist_limit'] *= scale
else: # non-uniform scaling
ss['nn'] *= nn_scale
_normalize_vectors(ss['nn'])
if ss['dist'] is not None:
add_dist = True
if add_dist:
logger.info("Recomputing distances, this might take a while")
dist_limit = np.asscalar(sss[0]['dist_limit'])
add_source_space_distances(sss, dist_limit, n_jobs)
write_source_spaces(dst, sss)
| bsd-3-clause | -3,178,512,339,683,785,700 | 35.018441 | 79 | 0.600931 | false |
adybbroe/atrain_match | python_edit_the_code.py | 1 | 3570 | import re, glob, os
ROOT_DIR = "/home/a001865/git/rename_atrainmatch/atrain_match/"
files = glob.glob(ROOT_DIR + "/*.py")
files = files + glob.glob(ROOT_DIR + "/*/*.py")
files = files + glob.glob(ROOT_DIR + "/*/*/*.py")
var_name_dict ={
"time": "profile_time",
"utc_time": "profile_utc_time",
#"cloud_top_profile": "layer_top_altitude",
#"cloud_top_profile_pressure":"layer_top_pressure",
#"cloud_base_profile": "layer_base_altitude",
#"number_of_layers_found": "number_layers_found",
"elevation": "dem_surface_elevation",
#"igbp": "igbp_surface_type",
#"nsidc": "nsidc_surface_type",
"optical_depth": "feature_optical_depth_532"}
for filename in files:
if os.path.basename(filename) in "python_edit_the_code.py":
continue
print "do not edit %s"%(os.path.basename(filename))
print filename
all_file=""
python_file = open(filename,'r')
for line in python_file:
#line = line.replace("avhrr", "imager")
#line = line.replace("AVHRR", "IMAGER")
#line = line.replace("Avhrr", "Imager")
#line = line.replace("nnImager", "nnAvhrr")
#line = line.replace("nnavhrr", "nnimager")
#line = line.replace("NN-IMAGER", "NN-AVHRR")
line = line.replace("cloudsat_calipso_imager", "truth_imager")
#if "_amsr" not in line:
# line = line.replace("amsr_imager", "match_util")
line = line.replace("match_match_util", "match_amsr_imager")
#line = re.sub(r"\.elevation", '.DEM_surface_elevation',line)
#if re.search("alipso\.elevation",line) and 1==2:
# line = line.rstrip()
# line = re.sub(r"alipso\.elevation",
# 'alipso.dem_surface_elevation',line)
# line = re.sub(r"alipsoObj\.elevation",
# 'alipsoObj.dem_surface_elevation',line)
#
# line = line + "\n"
#
#line = re.sub(r"nsidc", 'nsidc_surface_type',line)
#line = re.sub(r"igbp", 'igbp_surface_type',line)
#line = re.sub(r"number_of_layers_found", 'number_layers_found',line)
#line = re.sub(r"cloud_top_profile_pressure",
# 'layer_top_pressure',line)
#line = re.sub(r"cloud_base_profile",
# 'layer_base_altitude',line)
#line = re.sub(r"cloud_top_profile",
# 'layer_top_altitude',line)
#line = re.sub(r"\.optical_depth",
# '.feature_optical_depth_532',line)
#line = re.sub(r"\"optical_depth",
# '"feature_optical_depth_532',line)
#line = re.sub(r"\'optical_depth",
# '\'feature_optical_depth_532',line)
#line = re.sub(r"utc_time",
# 'profile_utc_time',line)
#line = re.sub(r"time_tai",
# 'profile_time_tai',line)
line = re.sub(r"feature_optical_depth_532_top_layer5km",
'feature_optical_depth_532_top_layer_5km',line)
"""Maybe not do this!!
line = re.sub(r"alipso\.time",
'alipso.profile_time',line)
line = re.sub(r"cal\.time",
'cal.profile_time',line)
line = re.sub(r"alipsoObj\.time",
'alipsoObj.profile_time',line)
"""
all_file += line
python_file.close()
python_file = open(filename,'w')
python_file.write(all_file)
| gpl-3.0 | 4,090,333,391,999,040,500 | 41 | 77 | 0.527451 | false |
jhlee525/janna | janna/streamer/batch.py | 1 | 4174 | from .base import *
from ..logger import logger
import numpy as np
from itertools import count
class Batch(StreamerBase):
_count = count(0)
def __init__(self, prev, batch_size, squeeze=False, name=None):
if not isinstance(batch_size, int):
raise KeyError('Batch size must be integer')
if batch_size <= 0:
raise KeyError('Batch size must be positive integer')
assert isinstance(name, (type(None), str))
# Naming
if name is None:
name = 'batch_' + str(next(Batch._count))
super(Batch, self).__init__(prev, BatchRunner(self, batch_size, squeeze), name)
self._batch_size = batch_size
self._squeeze = squeeze
def __len__(self):
if self._squeeze:
v = super(Batch, self).__len__() // self._batch_size
v = v + 1 if super(Batch, self).__len__() % self._batch_size > 0 else v
else:
v = super(Batch, self).__len__() // self._batch_size
return v
def process(self, sample):
sample_out = {}
for s in sample:
for k, v in s.items():
if k not in sample_out:
sample_out[k] = [v]
else:
sample_out[k].append(v)
for k, v in sample_out.items():
try:
sample_out[k] = np.stack(v, axis=0)
except:
continue
return sample_out
class BatchRunner(RunnerBase):
"""
Enables many-to-one get
"""
def __init__(self, node, chunk_size, squeeze):
super(BatchRunner, self).__init__(node)
self._queue = deque()
self._invalid_queue = deque()
self._chunk_size = chunk_size
self._squeeze = squeeze
self._is_last_batch = False
def reset(self):
super(BatchRunner, self).reset()
self.status = READY
self._queue.clear()
self._invalid_queue.clear()
self._is_last_batch = False
def get(self):
if len(self._invalid_queue) > 0:
return self._invalid_queue.popleft()
if self._is_last_batch is True:
if self._squeeze:
v_input = []
for _ in range(len(self._queue)):
v_input.append(self._queue.popleft())
v = self._node.process(v_input)
self.status = DRAINED
return v
else:
v = self._queue.popleft() if len(self._queue) > 0 else None
if v is not None:
logger.debug('{}: Sample {} discarded due to insufficient batch size'
.format(self._node.name, v['__id__']))
v['__invalid__'] = True
self.status = DRAINED if len(self._queue) == 0 else READY
return v
elif len(self._queue) >= self._chunk_size:
v_input = []
for _ in range(self._chunk_size):
v_input.append(self._queue.popleft())
v = self._node.process(v_input)
self.status = WAITING
return v
else:
self.status = WAITING
return None
def put(self, sample):
if sample['__invalid__'] is True:
self._invalid_queue.append(sample)
else:
self._queue.append(sample)
def update(self):
node = self._node
predecessors_status = [v.runner.status for v in node.graph.predecessors(node)]
all_drained_test = predecessors_status.count(DRAINED) == len(predecessors_status)
queue_empty_test = len(self._queue) < self._chunk_size
invalid_queue_empty_test = len(self._invalid_queue) == 0
if not invalid_queue_empty_test:
self.status = READY
elif all_drained_test and queue_empty_test:
if len(self._queue) > 0:
self._is_last_batch = True
self.status = READY
else:
self.status = DRAINED
elif queue_empty_test:
self.status = WAITING
else:
self.status = READY
def finalize(self):
pass
| mit | 8,256,329,126,504,383,000 | 32.392 | 89 | 0.516291 | false |
openwsn-berkeley/range_test | raspberry/experiment_tx.py | 1 | 23331 | """
Transmission script of the range test.
author Jonathan Munoz ([email protected]), January 2017
"""
import time
import logging
import threading
import sys
import sched
import Queue
import json
from datetime import datetime as dt
import datetime
import socket
from threading import Timer
import at86rf215_defs as defs
import at86rf215_driver as radio
import GpsThread as gps
import gpio_handler as gpio
FRAME_LENGTH = 2047
CRC_SIZE_LEGACY = 2
CRC_SIZE_154G = 2
SECURITY_TIME = 3 # 3 seconds to give more time to TRX to complete the 400 frame bursts.
START_OFFSET = 4 # 4.5 seconds after the starting time arrives.
MODEM_SUB_GHZ = 0
MODEM_2GHZ = 1
COUNTER_LENGTH = 2
class LoggerTx(threading.Thread):
def __init__(self, queue, settings):
# store parameters
self.queue = queue
self.settings = settings
# local variables
self.name_file = '/home/pi/range_test_raw_data_ofdm_vs_oqpsk/experiments_results_' + socket.gethostname() +\
'.json'
self.results = {'type': 'end_of_cycle_tx', 'start_time_str': time.strftime("%a, %d %b %Y %H:%M:%S UTC",
time.gmtime()),
'start_time_epoch': time.time(), 'radio_settings': None, 'GPSinfo_at_start': None,
'version': self.settings['version'], 'channel': None, 'frequency_0': None,
'burst_size': self.settings['numframes'], 'id': socket.gethostname()}
# start the thread
threading.Thread.__init__(self)
self.name = 'LoggerTx'
self.daemon = True
self.start()
logging.basicConfig(stream=sys.__stdout__, level=logging.DEBUG)
def run(self):
while True:
item = self.queue.get()
if item == 'Start':
if self.results['radio_settings']:
with open(self.name_file, 'a') as f:
f.write(json.dumps(self.results.copy())+'\n')
self.results['start_time_str'] = time.strftime("%a, %d %b %Y %H:%M:%S UTC", time.gmtime())
self.results['start_time_epoch'] = time.time()
elif item == 'Print last':
with open(self.name_file, 'a') as f:
f.write(json.dumps(self.results.copy())+'\n')
elif type(item) is tuple:
logging.info('Time to send the frames {0} - {1} was {2} seconds\n'.format(item[0] - 100, item[0],
item[1]))
elif type(item) is dict:
if item.get('frequency_0_kHz') is not None:
self.results['frequency_0'] = item['frequency_0_kHz']
self.results['channel'] = item['channel']
self.results['radio_settings'] = item['modulation']
else:
self.results['GPSinfo_at_start'] = item
elif type(item) is float:
logging.info('Time {0}'.format(item))
else:
logging.error('UNKNOWN ITEM IN THE QUEUE: {0}.'.format(item))
class ExperimentTx(threading.Thread):
def __init__(self, settings):
# local variables
self.settings = settings
self.queue_tx = Queue.Queue()
self.f_start_signal_LED = False
self.f_reset_button = False
self.f_exit = False
self.f_cancel_exp = False
self.hours = 0
self.minutes = 0
self.scheduler = sched.scheduler(time.time, time.sleep)
self.list_events_sched = [None for i in range(len(self.settings["test_settings"]))]
self.schedule_time = ['time' for i in range(len(self.settings["test_settings"]))]
self.led_array_pins = [29, 31, 33, 35, 37]
self.TRX_frame_pin = [36]
self.radio_isr_pin = 11
self.push_button_pin = 13
self.scheduler_aux = None
self.time_to_start = None
self.started_time = None
self.experiment_tx_led_start = None
self.experiment_scheduled = None
self.experiment_tx_thread = None
self.experiment_counter = 0
self.modem_base_band_state = MODEM_SUB_GHZ
self.dataLock = threading.RLock()
# start the threads
self.f_reset = threading.Event()
self.start_experiment = threading.Event()
self.end_experiment = threading.Event()
self.f_schedule = threading.Event()
self.f_reset.clear()
self.start_experiment.clear()
self.end_experiment.clear()
self.f_schedule.clear()
self.radio_driver = None
self.LoggerTx = None
self.gps = None
self.gpio_handler = None
# start all the drivers
# self._gps_init()
logging.info('radio setup')
self._radio_setup()
logging.info('logger init')
self._logger_init()
logging.info('gpio handler init')
self._gpio_handler_init()
logging.info('radio init')
self._radio_init()
logging.debug('INIT COMPLETE')
# start the thread
threading.Thread.__init__(self)
self.name = 'ExperimentTx_'
self.daemon = True
self.start()
# configure the logging module
# logging.basicConfig(stream=sys.__stdout__, level=logging.WARNING)
# ====================== private =========================================
def _radio_setup(self):
# initialize the radio driver
self.radio_driver = radio.At86rf215(None, None)
self.radio_driver.spi_init()
def _radio_init(self):
self.radio_driver.radio_reset()
self.radio_driver.read_isr_source() # no functional role, just clear the pending interrupt flag
def _gps_init(self):
logging.debug('in of GPS init')
# start the gps thread
self.gps = gps.GpsThread()
# waiting until the GPS time is valid
logging.info('waiting for valid GPS time...')
while self.gps.is_gps_time_valid() is False:
time.sleep(1)
logging.info('... time valid')
logging.debug('out of GPS init')
def _logger_init(self):
# initializes the LoggerRx thread
self.LoggerTx = LoggerTx(self.queue_tx, self.settings)
def _gpio_handler_init(self):
logging.info('gpio init!')
self.gpio_handler = gpio.GPIO_handler(self.radio_isr_pin, self.push_button_pin,
self.radio_driver.cb_radio_isr,
self._cb_push_button)
self.gpio_handler.init_binary_pins(self.led_array_pins)
self.gpio_handler.init_binary_pins(self.TRX_frame_pin)
self.gpio_handler.led_off(self.TRX_frame_pin)
self.gpio_handler.binary_counter(0, self.led_array_pins)
logging.info('GPIO INIT END')
def _start_time_experiment(self):
"""
it sets the next runtime for the whole experiment sequence in hours, minutes
current_time[3] = hours, current_time[4] = minutes, current_time[5] = seconds
:return: hours, minutes
"""
current_time = time.gmtime()
if current_time[5] < 50:
if current_time[4] is not 59:
new_time = current_time[3], current_time[4] + 1
else:
new_time = (current_time[3] + 1) % 24, 0
else:
if current_time[4] is 59:
new_time = (current_time[3] + 1) % 24, 1
else:
new_time = current_time[3], current_time[4] + 2
return new_time
def _stop_exp(self):
"""
it makes print the last modulation results
"""
self.queue_tx.put('Print last')
with self.dataLock:
self.end_experiment.set()
logging.info('before the led_end_experiment_signal, time: {0}, thread: {1}'.format(time.time(),
threading.current_thread()))
self._led_end_experiment_signal()
logging.debug('END OF EXPERIMENTS')
def _experiment_scheduling(self):
self.time_next_experiment = self.settings['test_settings'][self.experiment_counter % len(
self.settings['test_settings'])]['durationtx_s'] + SECURITY_TIME
logging.info('time of next experiment {0}, setting: {1}'.format(self.time_next_experiment, self.settings[
'test_settings'][self.experiment_counter % len(self.settings['test_settings'])]))
self.experiment_scheduled = Timer(self.time_next_experiment, self._experiment_scheduling, ())
self.experiment_scheduled.start()
self.experiment_tx_thread = threading.Thread(target=self._execute_experiment_tx, args=[self.settings[
'test_settings'][self.experiment_counter % len(self.settings['test_settings'])]])
self.experiment_tx_thread.start()
self.experiment_tx_thread.name = 'TX 1000 packets'
self.experiment_counter += 1
def _modem_2ghz(self):
self.modem_base_band_state = MODEM_2GHZ
def _execute_experiment_tx(self, item):
# TODO: do this as in the experiment RX
""""
:param item
"""
logging.info('start time TX 1000 : {0}'.format(time.time()))
total_time = time.time()
# logging.debug('entering _execute_experiment_tx, time: {0}, {1}'.format(time.time(), item['modulation']))
self.gpio_handler.led_off(self.TRX_frame_pin)
# clean the break _execute_experiment_tx flag
self.f_cancel_exp = False
self.queue_tx.put(time.time() - self.started_time)
self.gpio_handler.binary_counter(0, self.led_array_pins)
# initialize the frame counter
frame_counter = 0
# reset the radio to erase previous configuration
self.radio_driver.radio_reset()
# re-configure the radio
self.radio_driver.radio_write_config(defs.modulations_settings[item['modulation']])
# select the frequency
# if self.modem_base_band_state == MODEM_SUB_GHZ:
logging.debug('ITEM: {0}'.format(item))
if item['modem'] == "subGHz":
self.radio_driver.radio_off()
self.radio_driver.radio_set_frequency((item['channel_spacing_kHz'],
item['frequency_0_kHz'],
item['channel']))
elif item['modem'] == "2.4GHz":
self.radio_driver.radio_off_2_4ghz()
self.radio_driver.radio_set_frequency_2_4ghz((item['channel_spacing_kHz'],
item['frequency_0_kHz'],
item['channel']))
else:
logging.CRITICAL('ERROR')
self.gpio_handler.binary_counter(item['index'], self.led_array_pins)
logging.info('modulation: {0}, channel: {1}'.format(item["modulation"], item["channel"]))
# let know to the informative class the beginning of a new experiment
self.queue_tx.put('Start')
# log the config name
self.queue_tx.put(item)
# log GPS info
# self.queue_tx.put(self.gps.gps_info_read())
# if self.modem_base_band_state == MODEM_SUB_GHZ:
if item['standard'] == '802.15.4g':
# loop through packet lengths
for frame_length in self.settings["frame_lengths_15.4g"]:
# check if the reset button has been pressed
# logging.warning('self.radio_driver.read_reset_cmd(): {0}'.format(self.radio_driver.read_reset_cmd()))
if self.f_cancel_exp:
break
if item['modem'] == 'subGHz':
self.radio_driver.radio_trx_enable()
else:
self.radio_driver.radio_trx_enable_2_4ghz()
# send burst of frames
for i in range(self.settings['numframes']):
# create frame
frameToSend = [frame_counter >> 8, frame_counter & 0xFF] + [i & 0xFF for i in range(FRAME_LENGTH -
COUNTER_LENGTH)]
# increment the frame counter
frame_counter += 1
# send frame
if item['modem'] == 'subGHz':
self.radio_driver.radio_load_packet(frameToSend[:frame_length - CRC_SIZE_154G], CRC_SIZE_154G)
self.radio_driver.radio_tx_now()
else:
self.radio_driver.radio_load_packet_2_4ghz(frameToSend[:frame_length - CRC_SIZE_154G],
CRC_SIZE_154G)
self.radio_driver.radio_tx_now_2_4ghz()
# IFS
time.sleep(self.settings['IFS'])
self.gpio_handler.led_toggle(self.TRX_frame_pin)
# logging.warning('self.radio_driver.read_reset_cmd(): {0}'.format(self.radio_driver.read_reset_cmd()))
if self.f_cancel_exp:
break
# logging.info('EXIT FROM THE _execute_experiment_tx: {0}, {1}'.format(time.time(), item['modulation']))
logging.info('DURATION OF {0} is: {1}'.format(item["modulation"], (time.time() - total_time)))
# standard is IEEE802.15.4-2006
else:
# loop through packet lengths
for frame_length in self.settings["frame_lengths_15.4-2006"]:
# check if the reset button has been pressed
# logging.warning('self.radio_driver.read_reset_cmd(): {0}'.format(self.radio_driver.read_reset_cmd()))
if self.f_cancel_exp:
break
self.radio_driver.radio_trx_enable_2_4ghz()
# send burst of frames
for i in range(self.settings['numframes']):
# create frame
frameToSend = [frame_counter >> 8, frame_counter & 0xFF] + [i & 0xFF for i in
range(FRAME_LENGTH - COUNTER_LENGTH)]
# increment the frame counter
frame_counter += 1
# send frame
self.radio_driver.radio_load_packet_2_4ghz(frameToSend[:frame_length - CRC_SIZE_LEGACY],
CRC_SIZE_LEGACY)
self.radio_driver.radio_tx_now_2_4ghz()
# IFS
time.sleep(self.settings["IFS"])
self.gpio_handler.led_toggle(self.TRX_frame_pin)
# logging.warning('self.radio_driver.read_reset_cmd(): {0}'.format(self.radio_driver.read_reset_cmd()))
if self.f_cancel_exp:
break
# logging.info('EXIT FROM THE _execute_experiment_tx: {0}, {1}'.format(time.time(), item['modulation']))
logging.info('DURATION OF {0} is: {1}'.format(item["modulation"], (time.time() - total_time)))
self.radio_driver.radio_off_2_4ghz()
self.radio_driver.radio_off()
def _remove_scheduled_experiment(self):
events = self.scheduler.queue
for ev in events:
self.scheduler.cancel(ev)
def _led_end_experiment_signal(self):
i = 0
for led in self.led_array_pins:
self.gpio_handler.led_off(led)
while i < 20 and not self.f_reset.is_set():
for led in self.led_array_pins:
self.gpio_handler.led_toggle(led)
time.sleep(1)
i += 1
def _led_start_experiment_signal(self):
"""
it lights on a LED if the experiment will take place in the next minute
it uses the frame receive LED to indicate whether the experiment is going to start the next minute or not.
:return:
"""
logging.debug('entering led_start_experiment_signal')
while not self.f_start_signal_LED:
now = time.gmtime()
if self.minutes - now[4] == 1 or self.minutes - now[4] == -59:
logging.debug('SWITCHING LIGHT UP led_start_experiment_signal')
self.gpio_handler.led_on(self.TRX_frame_pin)
self.f_start_signal_LED = True
continue
time.sleep(1)
self.f_start_signal_LED = False
logging.debug('OUTING led_start_experiment_signal')
def run(self):
# setup the radio
# self._radio_setup()
logging.info('WAITING FOR THE START BUTTON TO BE PRESSED')
# push button signal
# self.start_experiment.wait()
# self.start_experiment.clear()
# gets current time and determines the running time for the experiment to start
self.started_time = time.time()
self.hours, self.minutes = self._start_time_experiment()
self.time_to_start = dt.combine(dt.now(), datetime.time(self.hours, self.minutes))
self.radio_driver.radio_off()
self.gpio_handler.led_off(self.TRX_frame_pin)
# self.gpio_handler.binary_counter(0, self.led_array_pins)
if self.experiment_scheduled:
logging.debug('cancelling experiment')
self.experiment_scheduled.cancel()
self.experiment_counter = 0
self.experiment_scheduled = Timer(
time.mktime(self.time_to_start.timetuple()) + START_OFFSET - time.time(),
self._experiment_scheduling, ())
self.experiment_scheduled.start()
logging.info('time left for the experiment to start: {0}'.format(time.mktime(self.time_to_start.timetuple())
+ START_OFFSET - time.time()))
logging.info('time to start experiment: {0}'.format(self.time_to_start.timetuple()))
self.experiment_tx_led_start = threading.Thread(target=self._led_start_experiment_signal)
self.experiment_tx_led_start.start()
self.experiment_tx_led_start.name = 'Experiment Rx thread start led signal'
# # it start the scheduler thread
# self.scheduler_aux = threading.Thread(target=self._experiment_scheduling)
# self.scheduler_aux.start()
# self.scheduler_aux.name = 'Scheduler Tx'
# logging.debug('waiting the end of the experiment')
# gives the signal to the scheduler to start scheduling the 31 experiments
# with self.dataLock:
# self.f_schedule.set()
# it will switch on the LED frame_received_pin to let the user know the experiment will start the following
# minute
# self.experiment_tx_led_start = threading.Thread(target=self._led_start_experiment_signal)
# self.experiment_tx_led_start.start()
# self.experiment_tx_led_start.name = 'Experiment Tx thread start led signal'
#
# while True:
#
# # it waits for the self.end_experiment signal that can be triggered at the end of the 31st experiment
# # or when the push button is pressed
# self.end_experiment.wait()
# self.end_experiment.clear()
# logging.info('END of the experiment, is self.end_experiment set? {0}'.format(self.end_experiment.is_set()))
#
# # if push button, removes all the experiments scheduled
# self.f_reset.wait()
# self.f_reset.clear()
# logging.info('RESET experiment, is self.f_reset set? {0}'.format(self.f_reset.is_set()))
#
# self.gpio_handler.led_off(self.TRX_frame_pin)
# logging.info('button pressed')
# logging.debug('RESETTING SCHEDULE')
# self._remove_scheduled_experiment()
# logging.debug('removed items in the queue')
# self.started_time = time.time()
#
# # determines the starting time for the new set of experiments
# self.hours, self.minutes = self._start_time_experiment()
# self.time_to_start = dt.combine(dt.now(), datetime.time(self.hours, self.minutes))
# logging.debug('WITHIN THE WHILE TRUE MAIN --->> self.time_to_start: {0}'.format(self.time_to_start))
# # self.gpio_handler.binary_counter(0, self.led_array_pins)
# self.experiment_tx_led_start = threading.Thread(target=self._led_start_experiment_signal)
# self.experiment_tx_led_start.start()
# self.experiment_tx_led_start.name = 'Experiment Tx thread start led signal'
# ======================== callbacks =======================================
def _cb_push_button(self, channel=13):
# pass
self.gpio_handler.clear_cb(13)
# switch on all leds to let the user know the push button has been pressed and it got the signal.
self.gpio_handler.binary_counter(31, self.led_array_pins)
if not self.f_reset_button:
with self.dataLock:
self.start_experiment.set()
self.f_reset_button = True
else:
logging.warning('RESET BUTTON PRESSED')
with self.dataLock:
self.end_experiment.set()
self.f_schedule.set()
self.f_reset.set()
self.f_cancel_exp = True
logging.info('f_reset set to true?: {0}'.format(self.f_reset.isSet()))
time.sleep(1)
self.gpio_handler.add_cb(self._cb_push_button, self.push_button_pin)
# ============================ main ==========================================
def load_experiment_details():
with open('/home/pi/range_test/raspberry/experiment_settings_outdoors_range_test.json', 'r') as f:
settings = f.read().replace('\n', ' ').replace('\r', '')
settings = json.loads(settings)
return settings
def main():
f_start = False
logging.basicConfig(stream=sys.__stdout__, level=logging.INFO)
# experimentTx = ExperimentTx(load_experiment_details())
while True:
input = raw_input('>')
if input == 's':
if not f_start:
f_start = True
logging.info('PROGRAM STARTING...')
experimentTx = ExperimentTx(load_experiment_details())
logging.info('PROGRAM RUNNING')
else:
logging.info('PROGRAM ALREADY STARTED')
if input == 'q':
if f_start:
experimentTx.gpio_handler.clean_gpio()
sys.exit(0)
if __name__ == '__main__':
main()
| bsd-2-clause | -7,964,004,491,712,060,000 | 41.730769 | 123 | 0.541983 | false |
jeromecc/doctoctocbot | src/moderation/twitter/user.py | 1 | 2364 | from moderation.models import SocialUser
from bot.tweepy_api import get_api as get_tweepy_api
from tweepy import TweepError
from tweepy.models import User as TweepyUser
from community.models import Community
import logging
from community.helpers import get_community_twitter_tweepy_api
import time
logger = logging.getLogger(__name__)
class TwitterUser:
def __init__(self, userid=None, socialuser=None):
try:
if userid and socialuser:
if socialuser.user_id != userid:
raise ValueError("userid and socialuser.user_id mismatch!")
self.id = userid
self.socialuser = socialuser
elif userid and not socialuser:
try:
su = SocialUser.objects.get(user_id=userid)
self.id=userid
self.socialuser=su
except SocialUser.DoesNotExist:
pass
elif not userid and socialuser:
self.id = socialuser.user_id
self.socialuser = socialuser
except ValueError:
pass
def __str__(self):
return f"TwitterUser id: {self.id}"
def is_protected(self):
try:
protected = self.socialuser.profile.json.get("protected")
except AttributeError:
protected = None
return protected
def friend(self, community):
if not isinstance(community, Community):
logger.error(
f"Given parameter {community} is not a Community object"
)
return
api = get_community_twitter_tweepy_api(
community = community,
backend=True
)
try:
tweepy_user = api.create_friendship(user_id=self.id)
logger.debug(tweepy_user)
if isinstance(tweepy_user, TweepyUser):
return True
except TweepError:
return False
def decline_follow_request(self, community):
api = get_community_twitter_tweepy_api(
community = community,
backend=True
)
resp = api.create_block(user_id=self.id)
logger.debug(resp)
time.sleep(1)
resp = api.destroy_block(user_id=self.id)
logger.debug(resp)
| mpl-2.0 | 6,726,107,315,724,089,000 | 30.118421 | 79 | 0.565144 | false |
bugzPDX/airmozilla | airmozilla/manage/autocompeter.py | 1 | 5110 | import datetime
import json
import time
import sys
from pprint import pprint
import requests
from django.conf import settings
from django.utils import timezone
from django.db.models import Count
from django.core.exceptions import ImproperlyConfigured
from funfactory.urlresolvers import reverse
from airmozilla.main.models import Event, EventHitStats
def _get_url():
return getattr(
settings,
'AUTOCOMPETER_URL',
'https://autocompeter.com/v1'
)
def update(
verbose=False, all=False, flush_first=False, max_=1000,
since=datetime.timedelta(minutes=60),
out=sys.stdout,
):
if not getattr(settings, 'AUTOCOMPETER_KEY', None):
if verbose: # pragma: no cover
print >>out, "Unable to submit titles to autocompeter.com"
print >>out, "No settings.AUTOCOMPETER_KEY set up"
return
autocompeter_url = _get_url()
if flush_first:
assert all, "must be all if you're flushing"
t0 = time.time()
response = requests.delete(
autocompeter_url + '/flush',
headers={
'Auth-Key': settings.AUTOCOMPETER_KEY,
},
verify=not settings.DEBUG
)
t1 = time.time()
if verbose: # pragma: no cover
print >>out, response
print >>out, "Took", t1 - t0, "seconds to flush"
assert response.status_code == 204, response.status_code
now = timezone.now()
if all:
hits_map = dict(
EventHitStats.objects.all().values_list('event', 'total_hits')
)
values = hits_map.values()
if values:
median_hits = sorted(values)[len(values) / 2]
else:
median_hits = 0
events = Event.objects.approved()
else:
events = (
Event.objects.approved()
.filter(modified__gte=now-since)[:max_]
)
if events:
# there are events, we'll need a hits_map and a median
hits_map = dict(
EventHitStats.objects.filter(event__in=events)
.values_list('event', 'total_hits')
)
values = (
EventHitStats.objects.all()
.values_list('total_hits', flat=True)
)
if values:
median_hits = sorted(values)[len(values) / 2]
else:
median_hits = 0
title_counts = {}
# Only bother to set this up if there are events to loop over.
# Oftentimes the cronjob will trigger here with no new recently changed
# events and then the loop below ('for event in events:') will do nothing.
if events:
grouped_by_title = (
Event.objects.approved().values('title').annotate(Count('title'))
)
for each in grouped_by_title:
title_counts[each['title']] = each['title__count']
documents = []
for event in events:
url = reverse('main:event', args=(event.slug,))
title = event.title
if event.start_time > now:
# future events can be important too
popularity = median_hits
else:
hits = hits_map.get(event.id, 0)
popularity = hits
if event.privacy == Event.PRIVACY_PUBLIC:
group = ''
else:
group = event.privacy
if title_counts[title] > 1:
title = '%s %s' % (title, event.start_time.strftime('%d %b %Y'))
documents.append({
'title': title,
'url': url,
'popularity': popularity,
'group': group,
})
if verbose: # pragma: no cover
pprint(documents, stream=out)
if not documents:
if verbose: # pragma: no cover
print >>out, "No documents."
return
t0 = time.time()
response = requests.post(
autocompeter_url + '/bulk',
data=json.dumps({'documents': documents}),
headers={
'Auth-Key': settings.AUTOCOMPETER_KEY,
},
verify=not settings.DEBUG
)
t1 = time.time()
assert response.status_code == 201, response.status_code
if verbose: # pragma: no cover
print >>out, response
print >>out, "Took", t1 - t0, "seconds to bulk submit"
def stats():
if not getattr(settings, 'AUTOCOMPETER_KEY', None):
raise ImproperlyConfigured("No settings.AUTOCOMPETER_KEY set up")
autocompeter_url = _get_url()
response = requests.get(
autocompeter_url + '/stats',
headers={
'Auth-Key': settings.AUTOCOMPETER_KEY,
},
verify=not settings.DEBUG
)
assert response.status_code == 200, response.status_code
return response.json()
def test(term, domain=None):
autocompeter_url = _get_url()
response = requests.get(
autocompeter_url,
params={
'd': domain or settings.AUTOCOMPETER_DOMAIN,
'q': term,
},
verify=not settings.DEBUG
)
assert response.status_code == 200, response.status_code
return response.json()
| bsd-3-clause | -4,856,101,207,579,082,000 | 28.709302 | 78 | 0.567515 | false |
ufal/neuralmonkey | neuralmonkey/evaluators/chrf.py | 1 | 3455 | from typing import List, Dict
from typeguard import check_argument_types
import numpy as np
from neuralmonkey.evaluators.evaluator import Evaluator
# pylint: disable=invalid-name
NGramDicts = List[Dict[str, int]]
# pylint: enable=invalid-name
class ChrFEvaluator(Evaluator[List[str]]):
"""Compute ChrF score.
See http://www.statmt.org/wmt15/pdf/WMT49.pdf
"""
def __init__(self,
n: int = 6,
beta: float = 1.0,
ignored_symbols: List[str] = None,
name: str = None) -> None:
check_argument_types()
if name is None:
name = "ChrF-{}".format(beta)
super().__init__(name)
self.n = n
self.beta_2 = beta**2
self.ignored = [] # type: List[str]
if ignored_symbols is not None:
self.ignored = ignored_symbols
def score_instance(self,
hypothesis: List[str],
reference: List[str]) -> float:
hyp_joined = " ".join(hypothesis)
hyp_chars = [x for x in list(hyp_joined) if x not in self.ignored]
hyp_ngrams = _get_ngrams(hyp_chars, self.n)
ref_joined = " ".join(reference)
ref_chars = [x for x in list(ref_joined) if x not in self.ignored]
ref_ngrams = _get_ngrams(ref_chars, self.n)
if not hyp_chars or not ref_chars:
if "".join(hyp_chars) == "".join(ref_chars):
return 1.0
return 0.0
precision = self.chr_p(hyp_ngrams, ref_ngrams)
recall = self.chr_r(hyp_ngrams, ref_ngrams)
if precision == 0.0 and recall == 0.0:
return 0.0
return ((1 + self.beta_2) * (precision * recall)
/ ((self.beta_2 * precision) + recall))
def chr_r(self, hyp_ngrams: NGramDicts, ref_ngrams: NGramDicts) -> float:
count_all = np.zeros(self.n)
count_matched = np.zeros(self.n)
for m in range(1, self.n + 1):
for ngr in ref_ngrams[m - 1]:
ref_count = ref_ngrams[m - 1][ngr]
count_all[m - 1] += ref_count
if ngr in hyp_ngrams[m - 1]:
count_matched[m - 1] += min(
ref_count, hyp_ngrams[m - 1][ngr])
return np.mean(np.divide(
count_matched, count_all, out=np.ones_like(count_all),
where=(count_all != 0)))
def chr_p(self, hyp_ngrams: NGramDicts, ref_ngrams: NGramDicts) -> float:
count_all = np.zeros(self.n)
count_matched = np.zeros(self.n)
for m in range(1, self.n + 1):
for ngr in hyp_ngrams[m - 1]:
hyp_count = hyp_ngrams[m - 1][ngr]
count_all[m - 1] += hyp_count
if ngr in ref_ngrams[m - 1]:
count_matched[m - 1] += min(
hyp_count, ref_ngrams[m - 1][ngr])
return np.mean(np.divide(
count_matched, count_all, out=np.ones_like(count_all),
where=(count_all != 0)))
def _get_ngrams(tokens: List[str], n: int) -> NGramDicts:
ngr_dicts = []
for m in range(1, n + 1):
ngr_dict = {} # type: Dict[str, int]
for i in range(m, len(tokens) + 1):
ngr = "".join(tokens[i - m:i])
ngr_dict[ngr] = ngr_dict.setdefault(ngr, 0) + 1
ngr_dicts.append(ngr_dict)
return ngr_dicts
# pylint: disable=invalid-name
ChrF3 = ChrFEvaluator(beta=3)
| bsd-3-clause | 5,065,427,443,965,041,000 | 33.207921 | 77 | 0.529957 | false |
UMONS-GFA/bdas | doc/sensors/sim_pluvio.py | 1 | 18407 | __author__ = 'kaufmanno'
import numpy as np
from scipy.interpolate import pchip_interpolate, interp1d
import matplotlib.pyplot as plt
draw_graphs = True
#draw_graphs = False
load_calibration = True
save_calibration = False
calibration_file = 'calibration.txt'
single_flow = True # a varying flow otherwise a series of flows
# if not single_flow :
min_flow = 1.0 # l/h
max_flow = 10.0 # l/h
flow_step = 0.1 # l/h
def schmitt_trigger(ts, low, high, threshold):
filtered = []
fd = []
is_high = False
is_low = False
state = np.NaN
for i in ts:
d = 0
if i < low:
is_low = True
state = 0
elif i > high:
is_high = True
state = 1
if is_low and i > threshold:
is_low = False
state = 1
d = 1
elif is_high and i < threshold:
is_high = False
state = 0
d = 0
filtered.append(state)
fd.append(d)
return filtered, fd
def comb_to_linapprox(comb):
sawtooth = np.zeros_like(comb, 'float64')
slope = np.zeros_like(comb, 'float64')
i = 0
start_tooth = i
while i < len(comb):
stop_tooth = i
if comb[i] == 0:
i += 1
else:
sawtooth[start_tooth:stop_tooth+1] = sawtooth[start_tooth:start_tooth+1]*np.ones(stop_tooth - start_tooth + 1) + np.linspace(0.0, 1.0, stop_tooth - start_tooth + 1)
slope[start_tooth:stop_tooth+1] = 1.0/(stop_tooth - start_tooth)
start_tooth = i
i += 1
return sawtooth, slope
def get_inflow(t, inflow_mean, inflow_variation, inflow_var_period, inflow_var_phase, inflow_random, random=False):
if random:
inflow = inflow_mean + inflow_variation*np.sin(2*np.pi*t/inflow_var_period+inflow_var_phase) + np.random.normal(0.0, inflow_random, 1)[0]
else:
inflow = inflow_mean + inflow_variation*np.sin(2*np.pi*t/inflow_var_period+inflow_var_phase)
return inflow
if __name__ == '__main__':
inflow = []
estimated_inflow = []
if single_flow:
flow_range = [min_flow]
else:
flow_range = np.arange(min_flow, max_flow, flow_step)
for tk_inflow_mean in flow_range:
# General constants
g = 9810 # [mm/s²]
eps0 = 8.85E-12 # void electric permittivity
epsr_teflon = 2.1
# Tank parameters
tk_overflow_height = 3.1 # height above tube in tank [mm]
tk_tube_height = 4.05 # height of the tube above the bottom of the tank [mm]
tk_tube_diameter = 3.5 # hole diameter [mm]
tk_tank_diameter = 80 # tank diameter [mm]
# Siphon gauge parameters
sg_siphon_height = 70.4 # height between bottom and top of siphon [mm]
sg_tube_diameter = 80.0 # siphon gauge tank diameter [mm]
sg_siphon_diameter = 6.0 # siphon tube diameter [mm]
sg_siphon_length = 300.0 # siphon tube length for outflow [mm]
sg_desiphoning_level = 1.5 # water level at which siphon stops to be active when level drops in the gauge [mm]
sg_residual_water_height = 39.5 # height of residual water after siphoning [mm]
# Sensor parameters
ss_length = 150 # length of cylindrical capacitor [mm]
ss_always_wet_length = tk_tube_height + sg_residual_water_height # length of cylindrical capacitor that is always wet (at the base of the upper tank and the gauge below the siphon) [mm]
ss_inner_radius = 10 # inner radius of the cylinder [mm]
ss_outer_radius = 10.4 # outer radius of the cylinder [mm]
ss_resistance = 500000 # R2 [ohm]
# Data acquisition parameters
das_period = 2 # sampling period [s]
# Derived tank parameters
tk_tank_area = np.pi/4*tk_tank_diameter**2 - np.pi*ss_outer_radius**2 # tank area [mm²]
tk_hole_area = np.pi/4*tk_tube_diameter**2 # tank area [mm²]
# Derived siphon gauge parameters
sg_tube_area = np.pi/4*sg_tube_diameter**2 - np.pi*ss_outer_radius**2 # tank area [mm²]
# Tank starting state
tk_water_level = 4.05 # level of water in tank above the hole [mm]
if single_flow:
tk_inflow_mean = 4.0 # mean volumetric inflow [l/h]
tk_inflow_variation = 3.0 # amplitude of the inflow variation [l/h]
tk_inflow_var_period = 8100.0 # period of the inflow variation [s]
tk_inflow_random = 0.01 # amplitude of random component on inflow [l/h]
tk_inflow_var_phase = 0.0 # phase of the inflow variation [rad]
else:
tk_inflow_variation = 0.0 # amplitude of the inflow variation [l/h]
tk_inflow_var_period = 1.0 # period of the inflow variation [s]
tk_inflow_random = 0.0 # amplitude of random component on inflow [l/h]
tk_inflow_var_phase = 0.0 # phase of the inflow variation [rad]
tk_outflow = 0.0 # volumetric outflow [l/h]
# Siphon gauge starting state
sg_water_level = 1.5 # level of water in the siphon gauge tank above the base of the siphon [mm]
sg_outflow = 0.0 # volumetric outflow [l/h]
sg_active = 0 # 1 when siphon is active 0 otherwise
# Simulation time
time_start = 0.0 # simulation starting time
time_end = 36000.0 # simulation ending time
time_step = .2 # [s]
# Initialisation
time = time_start
tk_inflow = get_inflow(time, tk_inflow_mean, tk_inflow_variation, tk_inflow_var_period, tk_inflow_var_phase, tk_inflow_random, single_flow)
t = [time]
tk_h = [tk_water_level]
tk_o = [tk_outflow]
tk_i = [tk_inflow]
sg_h = [sg_water_level]
sg_o = [sg_outflow]
sg_a = [sg_active]
sg_total_outflow_volume = 0
ss_capacity = (ss_always_wet_length + sg_water_level + tk_water_level) * epsr_teflon / 500 * np.pi * eps0 / np.log(ss_outer_radius / ss_inner_radius)
ss_frequency = 1/(0.693*2*ss_resistance*ss_capacity)
ss_counter = [ss_frequency*time_step]
# Theoretical siphoning time [h]
ts0 = 0.54*(sg_tube_area/100.0)*sg_siphon_length**(4/7)*sg_siphon_height**(3/7)/sg_siphon_diameter**(19/7)
print('siphoning time without inflow : %4.1f s' % ts0)
# Theoretical siphoning rate [l/h]
sr = sg_tube_area*sg_siphon_height*3.6/1000/ts0
print('siphoning rate : %4.2f l/h' % sr)
# Theoretical siphoning time with inflow
ts = ts0/(1-tk_inflow_mean/sr)
print('siphoning time with inflow of %4.2f l/h : %4.1f s' % (tk_inflow_mean, ts))
# sensor low and high frequencies
ss_min_capacity = ss_always_wet_length * epsr_teflon / 500 * np.pi * eps0 / np.log(ss_outer_radius / ss_inner_radius)
ss_max_freq = 1/(0.693*2*ss_resistance*ss_min_capacity)
ss_max_capacity = (ss_always_wet_length + sg_siphon_height + tk_overflow_height) * epsr_teflon / 500 * np.pi * eps0 / np.log(ss_outer_radius / ss_inner_radius)
ss_min_freq = 1/(0.693*2*ss_resistance*ss_max_capacity)
print('sensor frequency range [%5.0f Hz - %5.0f Hz]' % (ss_min_freq, ss_max_freq))
# Simulation
while time < time_end:
time += time_step
t.append(time)
# tk update
tk_net_input = time_step*(tk_inflow-tk_outflow)*1000/3.6 # net water input during time_step [mm³]
tk_water_level += tk_net_input/tk_tank_area
if tk_water_level > tk_overflow_height:
tk_water_level = tk_overflow_height
elif tk_water_level < 0.0:
tk_water_level = 0.0
tk_outflow = (2*g*tk_water_level)**(1/2)*tk_hole_area*3.6/1000 # [l/h]
tk_inflow = get_inflow(time, tk_inflow_mean, tk_inflow_variation, tk_inflow_var_period, tk_inflow_var_phase, tk_inflow_random, single_flow)
tk_h.append(tk_water_level)
tk_o.append(tk_outflow)
tk_i.append(tk_inflow)
# sg update
sg_net_input = time_step*(tk_outflow-sg_outflow)*1000/3.6 # net water input during time_step [mm³]
sg_water_level += sg_net_input/sg_tube_area
if sg_water_level > sg_siphon_height:
sg_active = 1
elif sg_water_level <= sg_desiphoning_level:
sg_active = 0
if sg_active == 1:
sg_outflow = np.pi/900*(sg_water_level/(0.000016*sg_siphon_length))**(4/7)*sg_siphon_diameter**(19/7) # [l/h]
else:
sg_outflow = 0.0
sg_total_outflow_volume += (sg_outflow/3600)*time_step # [l]
sg_h.append(sg_water_level)
sg_o.append(sg_outflow)
sg_a.append(sg_active)
# ss update
ss_counter.append(ss_frequency*time_step)
ss_capacity = (ss_always_wet_length + sg_water_level + tk_water_level) * epsr_teflon / 500 * np.pi * eps0 / np.log(ss_outer_radius / ss_inner_radius)
ss_frequency = 1/(0.693*2*ss_resistance*ss_capacity)
# # Simulation outputs
#print('Total outflow of gauge over %4.1f s : %4.3f l' % (time_end-time_start, sg_total_outflow_volume))
if draw_graphs:
sim_fig = plt.figure('Tank and siphon gauge')
# Tank
tk_ax1 = sim_fig.add_subplot(4, 1, 1)
tk_ax1.plot(t, tk_h, '-b')
tk_ax1.set_ylabel('level in \nupper tank [mm]')
tk_ax2 = sim_fig.add_subplot(4, 1, 2, sharex=tk_ax1)
tk_ax2.plot(t, tk_o, '-r')
tk_ax2.hold('on')
tk_ax2.plot(t, tk_i, '-g')
tk_ax2.set_ylabel('inflow in \nupper tank and\n outflow to \nsiphon gauge [l/h]')
# Siphon
tk_ax3 = sim_fig.add_subplot(4, 1, 3, sharex=tk_ax1)
tk_ax3.plot(t, sg_h, '-b')
tk_ax3.set_ylabel('level in \nsiphon gauge [mm]')
tk_ax4 = sim_fig.add_subplot(4, 1, 4, sharex=tk_ax1)
tk_ax4.plot(t, sg_o, '-g')
tk_ax4.hold('on')
tk_ax4.plot(t, sg_a, '-k')
tk_ax4.set_xlabel('time [s]')
tk_ax4.set_ylabel('outflow of \nsiphon gauge [l/h]')
# Data acquisition system output
das_fig = plt.figure('DAS acquisition')
das_ax1 = das_fig.add_subplot(5, 1, 1, sharex=tk_ax1)
das_ax1.plot(t, ss_counter, '-k')
das_ax1.set_ylabel('Sensor oscillations [-]')
# resample oscillations to compute DAS frequencies
das_t = []
das_frequencies = []
for i in range(0, len(ss_counter)-int(das_period / time_step), int(das_period / time_step)):
freq = 0
for j in range(0, int(das_period / time_step)):
freq += ss_counter[i+j]
das_t.append(time_start+(i+j)*time_step)
das_frequencies.append(freq/das_period)
x, das_siphoning = schmitt_trigger(das_frequencies, 5000, 7000, 9000)
das_sawtooth, das_slope = comb_to_linapprox(das_siphoning)
das_volume = das_sawtooth*sg_siphon_height*sg_tube_area/1000000
das_flow = das_slope *sg_siphon_height*sg_tube_area/1000000 / (das_period/3600)
if draw_graphs:
das_ax2 = das_fig.add_subplot(5, 1, 2, sharex=tk_ax1)
das_ax2.plot(das_t, das_frequencies, '-r')
das_ax2.set_ylabel('DAS Frequencies [Hz]')
das_ax3 = das_fig.add_subplot(5, 1, 3, sharex=tk_ax1)
das_ax3.plot(das_t, das_siphoning, '-k')
das_ax3.set_ylabel('Siphoning [0/1]')
das_ax4 = das_fig.add_subplot(5, 1, 4, sharex=tk_ax1)
das_ax4.plot(das_t, das_volume, '-r')
das_ax4.set_xlabel('time [s]')
das_ax4.set_ylabel('Volume [l]')
das_ax4.hold('on')
das_ax4.plot(t, np.cumsum(tk_o)/3600*time_step, '-g')
das_ax5 = das_fig.add_subplot(5, 1, 5, sharex=tk_ax1)
das_ax5.plot(das_t, das_flow, '-g')
das_ax5.set_xlabel('time [s]')
das_ax5.set_ylabel('Flow [l/h]')
plt.show()
print('Estimated total Volume : %d x %4.3f l = %4.3f l' %(np.sum(das_siphoning), sg_tube_area*sg_siphon_height/1000000, np.sum(das_siphoning)*sg_tube_area*sg_siphon_height/1000000))
print('________________________________________________')
inflow.append(tk_inflow_mean)
estimated_inflow.append(2*(das_volume[1349]-das_volume[449]))
flow_error = []
for i in range(0, len(inflow)):
flow_error.append(100*(inflow[i] - estimated_inflow[i])/estimated_inflow[i])
if not single_flow:
err_fig = plt.figure('errors')
flow_error = []
for i in range(0, len(inflow)):
flow_error.append(100*(inflow[i] - estimated_inflow[i])/estimated_inflow[i])
axes = err_fig.add_subplot(2, 1, 1)
axes.plot(estimated_inflow, inflow, '-b')
axes.set_xlabel('estimated inflow [l/h]')
axes.set_ylabel('real inflow [l/h]')
plt.xlim(0.0, 15.0)
plt.ylim(0.0, 15.0)
plt.grid(b=True, which='major', color='k', linestyle='-')
axes2 = err_fig.add_subplot(2, 1, 2, sharex=axes)
axes2.plot(estimated_inflow, flow_error, '-r')
axes2.set_xlabel('estimated inflow [l/h]')
axes2.set_ylabel('relative error [%]')
plt.xlim(0.0, 15.0)
plt.ylim(0.0, 50.0)
plt.grid(b=True, which='major', color='k', linestyle='-')
plt.show()
calibration = []
for i in range(len(flow_error)):
calibration.append(str('\t'.join(list(map(str,[estimated_inflow[i],flow_error[i], '\n'])))))
if save_calibration:
with open(calibration_file,'w+') as cal_file:
cal_file.writelines(calibration)
if load_calibration:
with open(calibration_file,'r') as cal_file:
rows = [list(map(float, L.strip().split('\t'))) for L in cal_file]
cal_estimated_inflow, cal_flow_error = [], []
for i in range(len(rows)):
cal_estimated_inflow.append(rows[i][0])
cal_flow_error.append(rows[i][1])
cal_inflow, cal_error = [], []
for i in range(len(cal_estimated_inflow)-1):
tmp_inflow = np.linspace(cal_estimated_inflow[i], cal_estimated_inflow[i+1], 10)
tmp_error = np.linspace(cal_flow_error[i], cal_flow_error[i+1], 10)
for j in range(len(tmp_error)):
cal_inflow.append(tmp_inflow[j])
cal_error.append(tmp_error[j])
corr_flow = []
for i in range(len(das_flow)):
for j in range(len(cal_error)):
if round(das_flow[i], 1) == round(cal_inflow[j], 1):
corr = cal_error[j]
break
else:
corr = 0.0
corr_flow.append(das_flow[i]*(1.0 + corr/100))
# corr_fig = plt.figure('Corrections')
# das_ax1 = corr_fig.add_subplot(1, 1, 1)
# das_ax1.plot(t, tk_i, '-g', label='simulated inflow')
# das_ax1.plot(das_t, das_flow, '-b',label='retrieved inflow')
# das_ax1.plot(das_t, corr_flow, '-r',label='corrected retrieved inflow')
# das_ax1.set_xlabel('time [s]')
# das_ax1.set_ylabel('Flow [l/h]')
# plt.legend()
# plt.show()
# alternative flow computation
centered_times = []
centered_flow = []
siphoning_time = [das_t[i] for i in range(len(das_t)) if das_siphoning[i] == 1]
for i in range(len(siphoning_time)-1):
centered_times.append((siphoning_time[i+1]+siphoning_time[i])/2)
centered_flow.append(sg_tube_area*sg_siphon_height*3.6/1000/(siphoning_time[i+1]-siphoning_time[i])) # [l/h]
corr_centered_flow = []
for i in range(len(centered_flow)):
for j in range(len(cal_error)):
if round(centered_flow[i], 1) == round(cal_inflow[j], 1):
corr = cal_error[j]
break
else:
corr = 0.0
corr_centered_flow.append(centered_flow[i]*(1.0 + corr/100))
interpolate_corr_flow = interp1d(centered_times, corr_centered_flow,kind='cubic')
interpolate_flow = interp1d(centered_times, centered_flow,kind='cubic')
das_t_interpolation = np.array(das_t)[(np.array(das_t) > centered_times[0]) & (np.array(das_t)<centered_times[-1])]
interpolated_flow = interpolate_flow(das_t_interpolation)
interpolated_corr_flow = interpolate_corr_flow(das_t_interpolation)
pchip_interpolated_flow = pchip_interpolate(centered_times, corr_centered_flow,das_t_interpolation)
import matplotlib
matplotlib.rcParams.update({'font.size':15})
corr_fig = plt.figure('Poster')
# Siphon
tk_ax3 = corr_fig.add_subplot(3, 1, 1)
tk_ax3.plot(t, sg_h, '-b')
tk_ax3.set_ylabel('level in \nsiphon gauge [mm]')
tk_ax3.set_axis_bgcolor('0.95')
tk_ax3.grid(True)
# DAS frequencies
das_ax2 = corr_fig.add_subplot(3, 1, 2, sharex=tk_ax3)
das_ax2.plot(das_t, das_frequencies, '-r')
das_ax2.set_ylabel('DAS Frequencies [Hz]')
das_ax2.set_axis_bgcolor('0.95')
das_ax2.grid(True)
# Retrieved flows
das_ax1 = corr_fig.add_subplot(3, 1, 3, sharex=tk_ax3)
das_ax1.plot(t, tk_i, '-', color='grey', linewidth=3, label='simulated inflow')
#das_ax1.plot(das_t, das_flow, '-b',label='retrieved inflow')
#das_ax1.plot(das_t, corr_flow, '-r',label='corrected retrieved inflow')
das_ax1.plot(das_t_interpolation, interpolated_flow, '-r', linewidth=2, label='retrieved inflow')
das_ax1.plot(das_t_interpolation, interpolated_corr_flow, '-k', linewidth=2, label='corrected retrieved inflow')
#das_ax1.plot(das_t_interpolation, pchip_interpolated_flow, '-b', label='piecwise cubic interpolated retrieved inflow')
#das_ax1.plot(centered_times,centered_flow,'ok')
das_ax1.set_xlabel('time [s]')
das_ax1.set_ylabel('Flow [l/h]')
das_ax1.set_axis_bgcolor('0.95')
das_ax1.grid(True)
das_ax1.legend(loc='lower right', fontsize=15)
tk_ax3.set_xlim((0, 36000))
plt.show()
corr_fig.savefig('/home/su530201/Images/Poster_GB2016.png', dpi=(600), bbox_inches='tight') | gpl-3.0 | 6,213,190,602,569,627,000 | 44.776119 | 194 | 0.574154 | false |
public-ink/public-ink | server/appengine-staging/lib/graphene/types/generic.py | 1 | 1233 | from __future__ import unicode_literals
from graphql.language.ast import (BooleanValue, FloatValue, IntValue,
StringValue, ListValue, ObjectValue)
from graphene.types.scalars import MIN_INT, MAX_INT
from .scalars import Scalar
class GenericScalar(Scalar):
"""
The `GenericScalar` scalar type represents a generic
GraphQL scalar value that could be:
String, Boolean, Int, Float, List or Object.
"""
@staticmethod
def identity(value):
return value
serialize = identity
parse_value = identity
@staticmethod
def parse_literal(ast):
if isinstance(ast, (StringValue, BooleanValue)):
return ast.value
elif isinstance(ast, IntValue):
num = int(ast.value)
if MIN_INT <= num <= MAX_INT:
return num
elif isinstance(ast, FloatValue):
return float(ast.value)
elif isinstance(ast, ListValue):
return [GenericScalar.parse_literal(value) for value in ast.values]
elif isinstance(ast, ObjectValue):
return {field.name.value: GenericScalar.parse_literal(field.value) for field in ast.fields}
else:
return None
| gpl-3.0 | 3,297,268,475,079,431,000 | 30.615385 | 103 | 0.631792 | false |
rlazojr/totalinstaller | plugin.program.community.builds/resetAddon.py | 1 | 1229 | #
# Copyright (C) 2014 Richard Dean
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with XBMC; see the file COPYING. If not, write to
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# http://www.gnu.org/copyleft/gpl.html
#
import xbmc
import xbmcgui
import xbmcaddon
import shutil
def resetAddon():
path = xbmc.translatePath('special://profile/addon_data/plugin.program.community.builds')
shutil.rmtree(path)
d = xbmcgui.Dialog()
d.ok('TR Community Builds', 'Community builds addon_data now removed.', 'Your locally stored builds will be unaffected but your', 'settings have now reset back to the defaults.')
if __name__ == '__main__':
resetAddon()
| gpl-2.0 | -6,033,804,512,929,722,000 | 34.114286 | 182 | 0.729048 | false |
melinath/django-graph-api | django_graph_api/graphql/introspection.py | 1 | 7781 | from django_graph_api.graphql.types import (
BooleanField,
CharField,
Enum,
ENUM,
EnumField,
INPUT_OBJECT,
INTERFACE,
List,
LIST,
ManyEnumField,
ManyRelatedField,
NON_NULL,
Object,
OBJECT,
RelatedField,
SCALAR,
UNION,
NonNull,
)
class DirectiveLocationEnum(Enum):
object_name = '__DirectiveLocation'
values = (
{
'name': 'QUERY',
'description': None,
'isDeprecated': False,
'deprecationReason': None,
},
{
'name': 'MUTATION',
'description': None,
'isDeprecated': False,
'deprecationReason': None,
},
{
'name': 'FIELD',
'description': None,
'isDeprecated': False,
'deprecationReason': None,
},
{
'name': 'FRAGMENT_DEFINITION',
'description': None,
'isDeprecated': False,
'deprecationReason': None,
},
{
'name': 'FRAGMENT_SPREAD',
'description': None,
'isDeprecated': False,
'deprecationReason': None,
},
{
'name': 'INLINE_FRAGMENT',
'description': None,
'isDeprecated': False,
'deprecationReason': None,
},
)
class TypeKindEnum(Enum):
object_name = '__TypeKind'
values = (
{
'name': SCALAR,
'description': None,
'isDeprecated': False,
'deprecationReason': None,
},
{
'name': OBJECT,
'description': None,
'isDeprecated': False,
'deprecationReason': None,
},
{
'name': INTERFACE,
'description': None,
'isDeprecated': False,
'deprecationReason': None,
},
{
'name': UNION,
'description': None,
'isDeprecated': False,
'deprecationReason': None,
},
{
'name': ENUM,
'description': None,
'isDeprecated': False,
'deprecationReason': None,
},
{
'name': INPUT_OBJECT,
'description': None,
'isDeprecated': False,
'deprecationReason': None,
},
{
'name': LIST,
'description': None,
'isDeprecated': False,
'deprecationReason': None,
},
{
'name': NON_NULL,
'description': None,
'isDeprecated': False,
'deprecationReason': None,
},
)
class InputValue(Object):
object_name = '__InputValue'
name = CharField()
description = CharField()
type = RelatedField(lambda: Type)
defaultValue = CharField()
def get_name(self):
return self.data[0]
def get_type(self):
type_ = self.data[1]
if not type_.null:
return NonNull(type_)
elif isinstance(type_, List):
return type_
return type_.__class__
class Directive(Object):
object_name = '__Directive'
name = CharField()
description = CharField()
locations = ManyEnumField(DirectiveLocationEnum)
args = ManyRelatedField(InputValue)
class Field(Object):
# self.data will be an item from a declared fields dict
object_name = '__Field'
name = CharField()
description = CharField()
type = RelatedField(lambda: Type)
args = ManyRelatedField(InputValue)
isDeprecated = BooleanField()
deprecationReason = CharField()
def get_name(self):
return self.data[0]
def get_description(self):
return getattr(self.data[1], 'description', None)
def get_type(self):
field = self.data[1]
if isinstance(field, RelatedField):
type_ = field.object_type
if isinstance(field.type_, List):
type_ = List(type_)
if not field.null:
type_ = NonNull(type_)
elif not field.null:
type_ = NonNull(field.type_)
else:
type_ = field.type_
return type_
def get_args(self):
return tuple(self.data[1].arguments.items())
class EnumValue(Object):
object_name = '__EnumValue'
name = CharField()
description = CharField()
isDeprecated = BooleanField()
deprecationReason = CharField()
class Type(Object):
# self.data will be an object or scalar
object_name = '__Type'
kind = EnumField(TypeKindEnum)
name = CharField()
description = CharField()
fields = ManyRelatedField(Field)
inputFields = ManyRelatedField(InputValue)
interfaces = ManyRelatedField('self')
possibleTypes = ManyRelatedField('self')
enumValues = ManyRelatedField(EnumValue)
ofType = RelatedField('self')
def get_name(self):
if self.data.kind in [LIST, NON_NULL]:
return None
return self.data.object_name
def get_fields(self):
if self.data.kind != OBJECT:
return None
return sorted(
(
(name, field)
for name, field in self.data._declared_fields.items()
if name[:2] != '__'
),
key=lambda item: item[0],
)
def get_inputFields(self):
if self.data.kind != INPUT_OBJECT:
return None
return []
def get_interfaces(self):
if self.data.kind != OBJECT:
return None
return []
def get_possibleTypes(self):
return None
def get_enumValues(self):
if self.data.kind != ENUM:
return None
return self.data.values
def get_ofType(self):
if self.data.kind in [NON_NULL, LIST]:
type_ = self.data.type_
# Don't return NonNull if self is already NonNull
if self.data.kind is not NON_NULL and not getattr(type_, 'null', True):
return NonNull(type_)
return type_
return None
class Schema(Object):
# self.data will be the query_root.
object_name = '__Schema'
types = ManyRelatedField(Type)
queryType = RelatedField(Type)
mutationType = RelatedField(Type)
directives = ManyRelatedField(Directive)
def _collect_types(self, object_type, types=None):
if types is None:
types = set((object_type,))
for field in object_type._declared_fields.values():
if isinstance(field, RelatedField):
object_type = field.object_type
if object_type in types:
continue
types.add(object_type)
self._collect_types(object_type, types)
elif isinstance(field, EnumField):
enum_type = field.enum
if enum_type in types:
continue
types.add(enum_type)
elif isinstance(field.type_, List):
field = field.type_
elif field.type_:
types.add(field.type_)
return types
def _type_key(self, type_):
object_name = type_.object_name
# Sort: defined types, introspection types, scalars, and then by name.
return (
type_.kind == SCALAR,
object_name.startswith('__'),
object_name,
)
def get_types(self):
types = self._collect_types(self.data.query_root_class)
return sorted(types, key=self._type_key)
def get_queryType(self):
return self.data.query_root_class
def get_mutationType(self):
return None
def get_directives(self):
return []
| mit | -8,515,198,416,324,023,000 | 25.198653 | 83 | 0.528081 | false |
fsimkovic/conkit | conkit/core/struct.py | 1 | 2751 | # BSD 3-Clause License
#
# Copyright (c) 2016-18, University of Liverpool
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Internal classes required by ConKit defining some sort of internal structure"""
from __future__ import division
from __future__ import print_function
__author__ = "Felix Simkovic"
__date__ = "03 Aug 2016"
__version__ = "1.0"
class _Struct(object):
"""A basic class representing a struct residue"""
__slots__ = ('res_seq', 'res_altseq', 'res_name', 'res_chain')
def __repr__(self):
string = "{name}(res_seq='{res_seq}' res_altseq='{res_altseq}' res_name='{res_name}' res_chain='{res_chain}')"
return string.format(name=self.__class__.__name__, **{k: getattr(self, k) for k in self.__class__.__slots__})
class Gap(_Struct):
"""A basic class representing a gap residue"""
IDENTIFIER = -999999
def __init__(self):
self.res_seq = Gap.IDENTIFIER
self.res_altseq = Gap.IDENTIFIER
self.res_name = 'X'
self.res_chain = ''
class Residue(_Struct):
"""A basic class representing a residue"""
def __init__(self, res_seq, res_altseq, res_name, res_chain):
self.res_seq = res_seq
self.res_altseq = res_altseq
self.res_name = res_name
self.res_chain = res_chain
| bsd-3-clause | 3,594,218,753,008,442,400 | 39.455882 | 118 | 0.708833 | false |
bravomikekilo/mxconsole | mxconsole/platform/flags.py | 1 | 4765 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of the flags interface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse as _argparse
from mxconsole.util.all_util import remove_undocumented
_global_parser = _argparse.ArgumentParser()
# pylint: disable=invalid-name
class _FlagValues(object):
"""Global container and accessor for flags and their values."""
def __init__(self):
self.__dict__['__flags'] = {}
self.__dict__['__parsed'] = False
def _parse_flags(self, args=None):
result, unparsed = _global_parser.parse_known_args(args=args)
for flag_name, val in vars(result).items():
self.__dict__['__flags'][flag_name] = val
self.__dict__['__parsed'] = True
return unparsed
def __getattr__(self, name):
"""Retrieves the 'value' attribute of the flag --name."""
if not self.__dict__['__parsed']:
self._parse_flags()
if name not in self.__dict__['__flags']:
raise AttributeError(name)
return self.__dict__['__flags'][name]
def __setattr__(self, name, value):
"""Sets the 'value' attribute of the flag --name."""
if not self.__dict__['__parsed']:
self._parse_flags()
self.__dict__['__flags'][name] = value
def _define_helper(flag_name, default_value, docstring, flagtype):
"""Registers 'flag_name' with 'default_value' and 'docstring'."""
_global_parser.add_argument('--' + flag_name,
default=default_value,
help=docstring,
type=flagtype)
# Provides the global object that can be used to access flags.
FLAGS = _FlagValues()
def DEFINE_string(flag_name, default_value, docstring):
"""Defines a flag of type 'string'.
Args:
flag_name: The name of the flag as a string.
default_value: The default value the flag should take as a string.
docstring: A helpful message explaining the use of the flag.
"""
_define_helper(flag_name, default_value, docstring, str)
def DEFINE_integer(flag_name, default_value, docstring):
"""Defines a flag of type 'int'.
Args:
flag_name: The name of the flag as a string.
default_value: The default value the flag should take as an int.
docstring: A helpful message explaining the use of the flag.
"""
_define_helper(flag_name, default_value, docstring, int)
def DEFINE_boolean(flag_name, default_value, docstring):
"""Defines a flag of type 'boolean'.
Args:
flag_name: The name of the flag as a string.
default_value: The default value the flag should take as a boolean.
docstring: A helpful message explaining the use of the flag.
"""
# Register a custom function for 'bool' so --flag=True works.
def str2bool(v):
return v.lower() in ('true', 't', '1')
_global_parser.add_argument('--' + flag_name,
nargs='?',
const=True,
help=docstring,
default=default_value,
type=str2bool)
# Add negated version, stay consistent with argparse with regard to
# dashes in flag names.
_global_parser.add_argument('--no' + flag_name,
action='store_false',
dest=flag_name.replace('-', '_'))
# The internal google library defines the following alias, so we match
# the API for consistency.
DEFINE_bool = DEFINE_boolean # pylint: disable=invalid-name
def DEFINE_float(flag_name, default_value, docstring):
"""Defines a flag of type 'float'.
Args:
flag_name: The name of the flag as a string.
default_value: The default value the flag should take as a float.
docstring: A helpful message explaining the use of the flag.
"""
_define_helper(flag_name, default_value, docstring, float)
_allowed_symbols = [
# We rely on gflags documentation.
'DEFINE_bool',
'DEFINE_boolean',
'DEFINE_float',
'DEFINE_integer',
'DEFINE_string',
'FLAGS',
]
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 | 3,211,316,646,001,435,600 | 32.321678 | 80 | 0.633998 | false |
bundlewrap/bundlewrap | bundlewrap/utils/ui.py | 1 | 14061 | from contextlib import contextmanager, suppress
from datetime import datetime
from functools import wraps
from os import _exit, environ, getpid, kill
from os.path import join
from select import select
from shutil import get_terminal_size
from signal import signal, SIG_DFL, SIGINT, SIGQUIT, SIGTERM
from subprocess import PIPE, Popen
import sys
import termios
from time import time
from threading import Event, Lock, Thread
from . import STDERR_WRITER, STDOUT_WRITER
from .table import render_table, ROW_SEPARATOR
from .text import (
HIDE_CURSOR,
SHOW_CURSOR,
ansi_clean,
blue,
bold,
format_duration,
mark_for_translation as _,
)
INFO_EVENT = Event()
QUIT_EVENT = Event()
SHUTDOWN_EVENT_HARD = Event()
SHUTDOWN_EVENT_SOFT = Event()
TTY = STDOUT_WRITER.isatty()
def add_debug_indicator(f):
@wraps(f)
def wrapped(self, msg, **kwargs):
return f(self, "[DEBUG] " + msg, **kwargs)
return wrapped
def add_debug_timestamp(f):
@wraps(f)
def wrapped(self, msg, **kwargs):
if self.debug_mode:
msg = datetime.now().strftime("[%Y-%m-%d %H:%M:%S.%f] ") + msg
return f(self, msg, **kwargs)
return wrapped
def capture_for_debug_logfile(f):
@wraps(f)
def wrapped(self, msg, **kwargs):
if self.debug_log_file and self._active:
with self.lock:
self.debug_log_file.write(
datetime.now().strftime("[%Y-%m-%d %H:%M:%S.%f] ") +
ansi_clean(msg).rstrip("\n") + "\n"
)
return f(self, msg, **kwargs)
return wrapped
def clear_formatting(f):
"""
Makes sure formatting from cut-off lines can't bleed into next one
"""
@wraps(f)
def wrapped(self, msg, **kwargs):
if TTY and environ.get("BW_COLORS", "1") != "0":
msg = "\033[0m" + msg
return f(self, msg, **kwargs)
return wrapped
def sigint_handler(*args, **kwargs):
"""
This handler is kept short since it interrupts execution of the
main thread. It's safer to handle these events in their own thread
because the main thread might be holding the IO lock while it is
interrupted.
"""
if not SHUTDOWN_EVENT_SOFT.is_set():
SHUTDOWN_EVENT_SOFT.set()
else:
SHUTDOWN_EVENT_HARD.set()
def sigquit_handler(*args, **kwargs):
"""
This handler is kept short since it interrupts execution of the
main thread. It's safer to handle these events in their own thread
because the main thread might be holding the IO lock while it is
interrupted.
"""
INFO_EVENT.set()
def spinner():
while True:
for c in "⠁⠈⠐⠠⢀⡀⠄⠂":
yield c
def page_lines(lines):
"""
View the given list of Unicode lines in a pager (e.g. `less`).
"""
lines = list(lines)
line_width = max([len(ansi_clean(line)) for line in lines])
if (
TTY and (
line_width > get_terminal_size().columns or
len(lines) > get_terminal_size().lines
)
):
write_to_stream(STDOUT_WRITER, SHOW_CURSOR)
env = environ.copy()
env["LESS"] = env.get("LESS", "") + " -R"
pager = Popen(
[environ.get("PAGER", "/usr/bin/less")],
env=env,
stdin=PIPE,
)
with suppress(BrokenPipeError):
pager.stdin.write("\n".join(lines).encode('utf-8'))
pager.stdin.close()
pager.communicate()
write_to_stream(STDOUT_WRITER, HIDE_CURSOR)
else:
for line in lines:
io.stdout(line)
def write_to_stream(stream, msg):
with suppress(BrokenPipeError):
if TTY:
stream.write(msg)
else:
stream.write(ansi_clean(msg))
stream.flush()
class DrainableStdin:
def get_input(self):
while True:
if QUIT_EVENT.is_set():
return None
if select([sys.stdin], [], [], 0.1)[0]:
return sys.stdin.readline().strip()
def drain(self):
if sys.stdin.isatty():
termios.tcflush(sys.stdin, termios.TCIFLUSH)
class IOManager:
"""
Threadsafe singleton class that handles all IO.
"""
def __init__(self):
self._active = False
self.debug_log_file = None
self.debug_mode = False
self.jobs = []
self.lock = Lock()
self.progress = 0
self.progress_start = None
self.progress_total = 0
self._spinner = spinner()
self._last_spinner_character = next(self._spinner)
self._last_spinner_update = 0
self._signal_handler_thread = None
self._child_pids = []
self._status_line_present = False
self._waiting_for_input = False
def activate(self):
self._active = True
if 'BW_DEBUG_LOG_DIR' in environ:
self.debug_log_file = open(join(
environ['BW_DEBUG_LOG_DIR'],
"{}_{}.log".format(
datetime.now().strftime("%Y-%m-%d_%H-%M-%S"),
getpid(),
),
), 'a')
self._signal_handler_thread = Thread(
target=self._signal_handler_thread_body,
)
# daemon mode is required because we need to keep the thread
# around until the end of a soft shutdown to wait for a hard
# shutdown signal, but don't have a feasible way of stopping
# the thread once the soft shutdown has completed
self._signal_handler_thread.daemon = True
self._signal_handler_thread.start()
signal(SIGINT, sigint_handler)
signal(SIGQUIT, sigquit_handler)
if TTY:
write_to_stream(STDOUT_WRITER, HIDE_CURSOR)
def ask(self, question, default, epilogue=None, input_handler=DrainableStdin()):
assert self._active
answers = _("[Y/n]") if default else _("[y/N]")
question = question + " " + answers + " "
self._waiting_for_input = True
with self.lock:
if QUIT_EVENT.is_set():
sys.exit(0)
self._clear_last_job()
while True:
write_to_stream(STDOUT_WRITER, "\a" + question + SHOW_CURSOR)
input_handler.drain()
answer = input_handler.get_input()
if answer is None:
if epilogue:
write_to_stream(STDOUT_WRITER, "\n" + epilogue + "\n")
QUIT_EVENT.set()
sys.exit(0)
elif answer.lower() in (_("y"), _("yes")) or (
not answer and default
):
answer = True
break
elif answer.lower() in (_("n"), _("no")) or (
not answer and not default
):
answer = False
break
write_to_stream(
STDOUT_WRITER,
_("Please answer with 'y(es)' or 'n(o)'.\n"),
)
if epilogue:
write_to_stream(STDOUT_WRITER, epilogue + "\n")
write_to_stream(STDOUT_WRITER, HIDE_CURSOR)
self._waiting_for_input = False
return answer
def deactivate(self):
self._active = False
if TTY:
write_to_stream(STDOUT_WRITER, SHOW_CURSOR)
signal(SIGINT, SIG_DFL)
signal(SIGQUIT, SIG_DFL)
self._signal_handler_thread.join()
if self.debug_log_file:
self.debug_log_file.close()
@clear_formatting
@add_debug_indicator
@capture_for_debug_logfile
@add_debug_timestamp
def debug(self, msg, append_newline=True):
if self.debug_mode:
with self.lock:
self._write(msg, append_newline=append_newline)
def job_add(self, msg):
if not self._active:
return
with self.lock:
self._clear_last_job()
self.jobs.append(msg)
self._write_current_job()
def job_del(self, msg):
if not self._active:
return
with self.lock:
self._clear_last_job()
self.jobs.remove(msg)
self._write_current_job()
def progress_advance(self, increment=1):
with self.lock:
self.progress += increment
def progress_increase_total(self, increment=1):
with self.lock:
self.progress_total += increment
def progress_set_total(self, total):
self.progress = 0
self.progress_start = datetime.utcnow()
self.progress_total = total
def progress_show(self):
if INFO_EVENT.is_set():
INFO_EVENT.clear()
table = []
if self.jobs:
table.append([bold(_("Running jobs")), self.jobs[0].strip()])
for job in self.jobs[1:]:
table.append(["", job.strip()])
try:
progress = (self.progress / float(self.progress_total))
elapsed = datetime.utcnow() - self.progress_start
remaining = elapsed / progress - elapsed
except ZeroDivisionError:
pass
else:
if table:
table.append(ROW_SEPARATOR)
table.extend([
[bold(_("Progress")), "{:.1f}%".format(progress * 100)],
ROW_SEPARATOR,
[bold(_("Elapsed")), format_duration(elapsed)],
ROW_SEPARATOR,
[
bold(_("Remaining")),
_("{} (estimate based on progress)").format(format_duration(remaining))
],
])
output = blue("i") + "\n"
if table:
for line in render_table(table):
output += ("{x} {line}\n".format(x=blue("i"), line=line))
else:
output += _("{x} No progress info available at this time.\n").format(x=blue("i"))
io.stderr(output + blue("i"))
@clear_formatting
@capture_for_debug_logfile
@add_debug_timestamp
def stderr(self, msg, append_newline=True):
with self.lock:
self._write(msg, append_newline=append_newline, err=True)
@clear_formatting
@capture_for_debug_logfile
@add_debug_timestamp
def stdout(self, msg, append_newline=True):
with self.lock:
self._write(msg, append_newline=append_newline)
@contextmanager
def job(self, job_text):
self.job_add(job_text)
try:
yield
finally:
self.job_del(job_text)
def job_wrapper(self, job_text):
def outer_wrapper(wrapped_function):
@wraps(wrapped_function)
def inner_wrapper(*args, **kwargs):
with self.job(job_text.format(*args, **kwargs)):
return wrapped_function(*args, **kwargs)
return inner_wrapper
return outer_wrapper
def _clear_last_job(self):
if self._status_line_present and TTY:
write_to_stream(STDOUT_WRITER, "\r\033[K")
self._status_line_present = False
def _signal_handler_thread_body(self):
while self._active:
self.progress_show()
if not self._waiting_for_input: # do not block and ignore SIGINT while .ask()ing
with self.lock:
self._clear_last_job()
self._write_current_job()
if QUIT_EVENT.is_set():
if SHUTDOWN_EVENT_HARD.wait(0.1):
self.stderr(_("{x} {signal} cleanup interrupted, exiting...").format(
signal=bold(_("SIGINT")),
x=blue("i"),
))
for ssh_pid in self._child_pids:
self.debug(_("killing SSH session with PID {pid}").format(pid=ssh_pid))
with suppress(ProcessLookupError):
kill(ssh_pid, SIGTERM)
self._clear_last_job()
if TTY:
write_to_stream(STDOUT_WRITER, SHOW_CURSOR)
_exit(1)
else:
if SHUTDOWN_EVENT_SOFT.wait(0.1):
QUIT_EVENT.set()
self.stderr(_(
"{x} {signal} canceling pending tasks... "
"(hit CTRL+C again for immediate dirty exit)"
).format(
signal=bold(_("SIGINT")),
x=blue("i"),
))
def _spinner_character(self):
if time() - self._last_spinner_update > 0.2:
self._last_spinner_update = time()
self._last_spinner_character = next(self._spinner)
return self._last_spinner_character
def _write(self, msg, append_newline=True, err=False):
if not self._active:
return
self._clear_last_job()
if msg is not None:
if append_newline:
msg += "\n"
write_to_stream(STDERR_WRITER if err else STDOUT_WRITER, msg)
self._write_current_job()
def _write_current_job(self):
if self.jobs and TTY:
line = "{} ".format(blue(self._spinner_character()))
# must track line length manually as len() will count ANSI escape codes
visible_length = 2
try:
progress = (self.progress / float(self.progress_total))
except ZeroDivisionError:
pass
else:
progress_text = "{:.1f}% ".format(progress * 100)
line += bold(progress_text)
visible_length += len(progress_text)
line += self.jobs[-1][:get_terminal_size().columns - 1 - visible_length]
write_to_stream(STDOUT_WRITER, line)
self._status_line_present = True
io = IOManager()
| gpl-3.0 | -4,341,101,949,010,468,400 | 31.969484 | 98 | 0.527376 | false |
susahe/sis | sis/schedule/models.py | 1 | 1278 | from __future__ import unicode_literals
from django.template.defaultfilters import slugify
from django.contrib.auth.models import User
from django.db import models
from course.models import Activity,Course,CourseGroup
from datetime import datetime
# Theory Session table create
# have relationship between course groups table an
class TheorySession(models.Model):
coursegroup = models.ForeignKey(CourseGroup)
name = models.CharField(max_length=120)
start_time = models.DateTimeField(default=datetime.now, blank=True)
end_time = models.DateTimeField(default=datetime.now, blank=True)
activity = models.ForeignKey(Activity)
is_present= models.BooleanField()
class LabSession(models.Model):
name = models.CharField(max_length=120)
start_time = models.DateTimeField(default=datetime.now, blank=True)
end_time = models.DateTimeField(default=datetime.now, blank=True)
activity = models.ForeignKey(Activity)
is_present= models.BooleanField()
class PracticalSession(models.Model):
name = models.CharField(max_length=120)
user = models.ForeignKey(User)
start_time = models.DateTimeField(default=datetime.now, blank=True)
end_time = models.DateTimeField(default=datetime.now, blank=True)
activity = models.ForeignKey(Activity)
is_present= models.BooleanField()
| gpl-2.0 | 7,980,110,490,229,708,000 | 35.514286 | 68 | 0.79734 | false |
Cinntax/home-assistant | homeassistant/components/plex/media_player.py | 1 | 28269 | """Support to interface with the Plex API."""
from datetime import timedelta
import json
import logging
import plexapi.exceptions
import plexapi.playlist
import plexapi.playqueue
import requests.exceptions
from homeassistant.components.media_player import MediaPlayerDevice
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MOVIE,
MEDIA_TYPE_MUSIC,
MEDIA_TYPE_TVSHOW,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
)
from homeassistant.const import (
DEVICE_DEFAULT_NAME,
STATE_IDLE,
STATE_OFF,
STATE_PAUSED,
STATE_PLAYING,
)
from homeassistant.helpers.event import track_time_interval
from homeassistant.util import dt as dt_util
from .const import (
CONF_SERVER_IDENTIFIER,
DOMAIN as PLEX_DOMAIN,
NAME_FORMAT,
REFRESH_LISTENERS,
SERVERS,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Plex media_player platform.
Deprecated.
"""
pass
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Plex media_player from a config entry."""
def add_entities(entities, update_before_add=False):
"""Sync version of async add entities."""
hass.add_job(async_add_entities, entities, update_before_add)
hass.async_add_executor_job(_setup_platform, hass, config_entry, add_entities)
def _setup_platform(hass, config_entry, add_entities_callback):
"""Set up the Plex media_player platform."""
server_id = config_entry.data[CONF_SERVER_IDENTIFIER]
plexserver = hass.data[PLEX_DOMAIN][SERVERS][server_id]
plex_clients = {}
plex_sessions = {}
hass.data[PLEX_DOMAIN][REFRESH_LISTENERS][server_id] = track_time_interval(
hass, lambda now: update_devices(), timedelta(seconds=10)
)
def update_devices():
"""Update the devices objects."""
try:
devices = plexserver.clients()
except plexapi.exceptions.BadRequest:
_LOGGER.exception("Error listing plex devices")
return
except requests.exceptions.RequestException as ex:
_LOGGER.warning(
"Could not connect to Plex server: %s (%s)",
plexserver.friendly_name,
ex,
)
return
new_plex_clients = []
available_client_ids = []
for device in devices:
# For now, let's allow all deviceClass types
if device.deviceClass in ["badClient"]:
continue
available_client_ids.append(device.machineIdentifier)
if device.machineIdentifier not in plex_clients:
new_client = PlexClient(
plexserver, device, None, plex_sessions, update_devices
)
plex_clients[device.machineIdentifier] = new_client
_LOGGER.debug("New device: %s", device.machineIdentifier)
new_plex_clients.append(new_client)
else:
_LOGGER.debug("Refreshing device: %s", device.machineIdentifier)
plex_clients[device.machineIdentifier].refresh(device, None)
# add devices with a session and no client (ex. PlexConnect Apple TV's)
try:
sessions = plexserver.sessions()
except plexapi.exceptions.BadRequest:
_LOGGER.exception("Error listing plex sessions")
return
except requests.exceptions.RequestException as ex:
_LOGGER.warning(
"Could not connect to Plex server: %s (%s)",
plexserver.friendly_name,
ex,
)
return
plex_sessions.clear()
for session in sessions:
for player in session.players:
plex_sessions[player.machineIdentifier] = session, player
for machine_identifier, (session, player) in plex_sessions.items():
if machine_identifier in available_client_ids:
# Avoid using session if already added as a device.
_LOGGER.debug("Skipping session, device exists: %s", machine_identifier)
continue
if (
machine_identifier not in plex_clients
and machine_identifier is not None
):
new_client = PlexClient(
plexserver, player, session, plex_sessions, update_devices
)
plex_clients[machine_identifier] = new_client
_LOGGER.debug("New session: %s", machine_identifier)
new_plex_clients.append(new_client)
else:
_LOGGER.debug("Refreshing session: %s", machine_identifier)
plex_clients[machine_identifier].refresh(None, session)
for client in plex_clients.values():
# force devices to idle that do not have a valid session
if client.session is None:
client.force_idle()
client.set_availability(
client.machine_identifier in available_client_ids
or client.machine_identifier in plex_sessions
)
if client not in new_plex_clients:
client.schedule_update_ha_state()
if new_plex_clients:
add_entities_callback(new_plex_clients)
class PlexClient(MediaPlayerDevice):
"""Representation of a Plex device."""
def __init__(self, plex_server, device, session, plex_sessions, update_devices):
"""Initialize the Plex device."""
self._app_name = ""
self._device = None
self._available = False
self._marked_unavailable = None
self._device_protocol_capabilities = None
self._is_player_active = False
self._is_player_available = False
self._player = None
self._machine_identifier = None
self._make = ""
self._name = None
self._player_state = "idle"
self._previous_volume_level = 1 # Used in fake muting
self._session = None
self._session_type = None
self._session_username = None
self._state = STATE_IDLE
self._volume_level = 1 # since we can't retrieve remotely
self._volume_muted = False # since we can't retrieve remotely
self.plex_server = plex_server
self.plex_sessions = plex_sessions
self.update_devices = update_devices
# General
self._media_content_id = None
self._media_content_rating = None
self._media_content_type = None
self._media_duration = None
self._media_image_url = None
self._media_title = None
self._media_position = None
self._media_position_updated_at = None
# Music
self._media_album_artist = None
self._media_album_name = None
self._media_artist = None
self._media_track = None
# TV Show
self._media_episode = None
self._media_season = None
self._media_series_title = None
self.refresh(device, session)
def _clear_media_details(self):
"""Set all Media Items to None."""
# General
self._media_content_id = None
self._media_content_rating = None
self._media_content_type = None
self._media_duration = None
self._media_image_url = None
self._media_title = None
# Music
self._media_album_artist = None
self._media_album_name = None
self._media_artist = None
self._media_track = None
# TV Show
self._media_episode = None
self._media_season = None
self._media_series_title = None
# Clear library Name
self._app_name = ""
def refresh(self, device, session):
"""Refresh key device data."""
self._clear_media_details()
if session: # Not being triggered by Chrome or FireTablet Plex App
self._session = session
if device:
self._device = device
try:
device_url = self._device.url("/")
except plexapi.exceptions.BadRequest:
device_url = "127.0.0.1"
if "127.0.0.1" in device_url:
self._device.proxyThroughServer()
self._session = None
self._machine_identifier = self._device.machineIdentifier
self._name = NAME_FORMAT.format(self._device.title or DEVICE_DEFAULT_NAME)
self._device_protocol_capabilities = self._device.protocolCapabilities
# set valid session, preferring device session
if self._device.machineIdentifier in self.plex_sessions:
self._session = self.plex_sessions.get(
self._device.machineIdentifier, [None, None]
)[0]
if self._session:
if (
self._device is not None
and self._device.machineIdentifier is not None
and self._session.players
):
self._is_player_available = True
self._player = [
p
for p in self._session.players
if p.machineIdentifier == self._device.machineIdentifier
][0]
self._name = NAME_FORMAT.format(self._player.title)
self._player_state = self._player.state
self._session_username = self._session.usernames[0]
self._make = self._player.device
else:
self._is_player_available = False
# Calculate throttled position for proper progress display.
position = int(self._session.viewOffset / 1000)
now = dt_util.utcnow()
if self._media_position is not None:
pos_diff = position - self._media_position
time_diff = now - self._media_position_updated_at
if pos_diff != 0 and abs(time_diff.total_seconds() - pos_diff) > 5:
self._media_position_updated_at = now
self._media_position = position
else:
self._media_position_updated_at = now
self._media_position = position
self._media_content_id = self._session.ratingKey
self._media_content_rating = getattr(self._session, "contentRating", None)
self._set_player_state()
if self._is_player_active and self._session is not None:
self._session_type = self._session.type
self._media_duration = int(self._session.duration / 1000)
# title (movie name, tv episode name, music song name)
self._media_title = self._session.title
# media type
self._set_media_type()
self._app_name = (
self._session.section().title
if self._session.section() is not None
else ""
)
self._set_media_image()
else:
self._session_type = None
def _set_media_image(self):
thumb_url = self._session.thumbUrl
if (
self.media_content_type is MEDIA_TYPE_TVSHOW
and not self.plex_server.use_episode_art
):
thumb_url = self._session.url(self._session.grandparentThumb)
if thumb_url is None:
_LOGGER.debug(
"Using media art because media thumb " "was not found: %s",
self.entity_id,
)
thumb_url = self.session.url(self._session.art)
self._media_image_url = thumb_url
def set_availability(self, available):
"""Set the device as available/unavailable noting time."""
if not available:
self._clear_media_details()
if self._marked_unavailable is None:
self._marked_unavailable = dt_util.utcnow()
else:
self._marked_unavailable = None
self._available = available
def _set_player_state(self):
if self._player_state == "playing":
self._is_player_active = True
self._state = STATE_PLAYING
elif self._player_state == "paused":
self._is_player_active = True
self._state = STATE_PAUSED
elif self.device:
self._is_player_active = False
self._state = STATE_IDLE
else:
self._is_player_active = False
self._state = STATE_OFF
def _set_media_type(self):
if self._session_type in ["clip", "episode"]:
self._media_content_type = MEDIA_TYPE_TVSHOW
# season number (00)
if callable(self._session.season):
self._media_season = str((self._session.season()).index).zfill(2)
elif self._session.parentIndex is not None:
self._media_season = self._session.parentIndex.zfill(2)
else:
self._media_season = None
# show name
self._media_series_title = self._session.grandparentTitle
# episode number (00)
if self._session.index is not None:
self._media_episode = str(self._session.index).zfill(2)
elif self._session_type == "movie":
self._media_content_type = MEDIA_TYPE_MOVIE
if self._session.year is not None and self._media_title is not None:
self._media_title += " (" + str(self._session.year) + ")"
elif self._session_type == "track":
self._media_content_type = MEDIA_TYPE_MUSIC
self._media_album_name = self._session.parentTitle
self._media_album_artist = self._session.grandparentTitle
self._media_track = self._session.index
self._media_artist = self._session.originalTitle
# use album artist if track artist is missing
if self._media_artist is None:
_LOGGER.debug(
"Using album artist because track artist " "was not found: %s",
self.entity_id,
)
self._media_artist = self._media_album_artist
def force_idle(self):
"""Force client to idle."""
self._state = STATE_IDLE
self._session = None
self._clear_media_details()
@property
def should_poll(self):
"""Return True if entity has to be polled for state."""
return False
@property
def unique_id(self):
"""Return the id of this plex client."""
return self.machine_identifier
@property
def available(self):
"""Return the availability of the client."""
return self._available
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def machine_identifier(self):
"""Return the machine identifier of the device."""
return self._machine_identifier
@property
def app_name(self):
"""Return the library name of playing media."""
return self._app_name
@property
def device(self):
"""Return the device, if any."""
return self._device
@property
def marked_unavailable(self):
"""Return time device was marked unavailable."""
return self._marked_unavailable
@property
def session(self):
"""Return the session, if any."""
return self._session
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def _active_media_plexapi_type(self):
"""Get the active media type required by PlexAPI commands."""
if self.media_content_type is MEDIA_TYPE_MUSIC:
return "music"
return "video"
@property
def media_content_id(self):
"""Return the content ID of current playing media."""
return self._media_content_id
@property
def media_content_type(self):
"""Return the content type of current playing media."""
if self._session_type == "clip":
_LOGGER.debug(
"Clip content type detected, " "compatibility may vary: %s",
self.entity_id,
)
return MEDIA_TYPE_TVSHOW
if self._session_type == "episode":
return MEDIA_TYPE_TVSHOW
if self._session_type == "movie":
return MEDIA_TYPE_MOVIE
if self._session_type == "track":
return MEDIA_TYPE_MUSIC
return None
@property
def media_artist(self):
"""Return the artist of current playing media, music track only."""
return self._media_artist
@property
def media_album_name(self):
"""Return the album name of current playing media, music track only."""
return self._media_album_name
@property
def media_album_artist(self):
"""Return the album artist of current playing media, music only."""
return self._media_album_artist
@property
def media_track(self):
"""Return the track number of current playing media, music only."""
return self._media_track
@property
def media_duration(self):
"""Return the duration of current playing media in seconds."""
return self._media_duration
@property
def media_position(self):
"""Return the duration of current playing media in seconds."""
return self._media_position
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid."""
return self._media_position_updated_at
@property
def media_image_url(self):
"""Return the image URL of current playing media."""
return self._media_image_url
@property
def media_title(self):
"""Return the title of current playing media."""
return self._media_title
@property
def media_season(self):
"""Return the season of current playing media (TV Show only)."""
return self._media_season
@property
def media_series_title(self):
"""Return the title of the series of current playing media."""
return self._media_series_title
@property
def media_episode(self):
"""Return the episode of current playing media (TV Show only)."""
return self._media_episode
@property
def make(self):
"""Return the make of the device (ex. SHIELD Android TV)."""
return self._make
@property
def supported_features(self):
"""Flag media player features that are supported."""
if not self._is_player_active:
return 0
# force show all controls
if self.plex_server.show_all_controls:
return (
SUPPORT_PAUSE
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_STOP
| SUPPORT_VOLUME_SET
| SUPPORT_PLAY
| SUPPORT_TURN_OFF
| SUPPORT_VOLUME_MUTE
)
# only show controls when we know what device is connecting
if not self._make:
return 0
# no mute support
if self.make.lower() == "shield android tv":
_LOGGER.debug(
"Shield Android TV client detected, disabling mute " "controls: %s",
self.entity_id,
)
return (
SUPPORT_PAUSE
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_STOP
| SUPPORT_VOLUME_SET
| SUPPORT_PLAY
| SUPPORT_TURN_OFF
)
# Only supports play,pause,stop (and off which really is stop)
if self.make.lower().startswith("tivo"):
_LOGGER.debug(
"Tivo client detected, only enabling pause, play, "
"stop, and off controls: %s",
self.entity_id,
)
return SUPPORT_PAUSE | SUPPORT_PLAY | SUPPORT_STOP | SUPPORT_TURN_OFF
# Not all devices support playback functionality
# Playback includes volume, stop/play/pause, etc.
if self.device and "playback" in self._device_protocol_capabilities:
return (
SUPPORT_PAUSE
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_STOP
| SUPPORT_VOLUME_SET
| SUPPORT_PLAY
| SUPPORT_TURN_OFF
| SUPPORT_VOLUME_MUTE
)
return 0
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.setVolume(int(volume * 100), self._active_media_plexapi_type)
self._volume_level = volume # store since we can't retrieve
self.update_devices()
@property
def volume_level(self):
"""Return the volume level of the client (0..1)."""
if (
self._is_player_active
and self.device
and "playback" in self._device_protocol_capabilities
):
return self._volume_level
@property
def is_volume_muted(self):
"""Return boolean if volume is currently muted."""
if self._is_player_active and self.device:
return self._volume_muted
def mute_volume(self, mute):
"""Mute the volume.
Since we can't actually mute, we'll:
- On mute, store volume and set volume to 0
- On unmute, set volume to previously stored volume
"""
if not (self.device and "playback" in self._device_protocol_capabilities):
return
self._volume_muted = mute
if mute:
self._previous_volume_level = self._volume_level
self.set_volume_level(0)
else:
self.set_volume_level(self._previous_volume_level)
def media_play(self):
"""Send play command."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.play(self._active_media_plexapi_type)
self.update_devices()
def media_pause(self):
"""Send pause command."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.pause(self._active_media_plexapi_type)
self.update_devices()
def media_stop(self):
"""Send stop command."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.stop(self._active_media_plexapi_type)
self.update_devices()
def turn_off(self):
"""Turn the client off."""
# Fake it since we can't turn the client off
self.media_stop()
def media_next_track(self):
"""Send next track command."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.skipNext(self._active_media_plexapi_type)
self.update_devices()
def media_previous_track(self):
"""Send previous track command."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.skipPrevious(self._active_media_plexapi_type)
self.update_devices()
def play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
if not (self.device and "playback" in self._device_protocol_capabilities):
return
src = json.loads(media_id)
media = None
if media_type == "MUSIC":
media = (
self.device.server.library.section(src["library_name"])
.get(src["artist_name"])
.album(src["album_name"])
.get(src["track_name"])
)
elif media_type == "EPISODE":
media = self._get_tv_media(
src["library_name"],
src["show_name"],
src["season_number"],
src["episode_number"],
)
elif media_type == "PLAYLIST":
media = self.device.server.playlist(src["playlist_name"])
elif media_type == "VIDEO":
media = self.device.server.library.section(src["library_name"]).get(
src["video_name"]
)
if (
media
and media_type == "EPISODE"
and isinstance(media, plexapi.playlist.Playlist)
):
# delete episode playlist after being loaded into a play queue
self._client_play_media(media=media, delete=True, shuffle=src["shuffle"])
elif media:
self._client_play_media(media=media, shuffle=src["shuffle"])
def _get_tv_media(self, library_name, show_name, season_number, episode_number):
"""Find TV media and return a Plex media object."""
target_season = None
target_episode = None
show = self.device.server.library.section(library_name).get(show_name)
if not season_number:
playlist_name = f"{self.entity_id} - {show_name} Episodes"
return self.device.server.createPlaylist(playlist_name, show.episodes())
for season in show.seasons():
if int(season.seasonNumber) == int(season_number):
target_season = season
break
if target_season is None:
_LOGGER.error(
"Season not found: %s\\%s - S%sE%s",
library_name,
show_name,
str(season_number).zfill(2),
str(episode_number).zfill(2),
)
else:
if not episode_number:
playlist_name = "{} - {} Season {} Episodes".format(
self.entity_id, show_name, str(season_number)
)
return self.device.server.createPlaylist(
playlist_name, target_season.episodes()
)
for episode in target_season.episodes():
if int(episode.index) == int(episode_number):
target_episode = episode
break
if target_episode is None:
_LOGGER.error(
"Episode not found: %s\\%s - S%sE%s",
library_name,
show_name,
str(season_number).zfill(2),
str(episode_number).zfill(2),
)
return target_episode
def _client_play_media(self, media, delete=False, **params):
"""Instruct Plex client to play a piece of media."""
if not (self.device and "playback" in self._device_protocol_capabilities):
_LOGGER.error("Client cannot play media: %s", self.entity_id)
return
playqueue = plexapi.playqueue.PlayQueue.create(
self.device.server, media, **params
)
# Delete dynamic playlists used to build playqueue (ex. play tv season)
if delete:
media.delete()
server_url = self.device.server.baseurl.split(":")
self.device.sendCommand(
"playback/playMedia",
**dict(
{
"machineIdentifier": self.device.server.machineIdentifier,
"address": server_url[1].strip("/"),
"port": server_url[-1],
"key": media.key,
"containerKey": "/playQueues/{}?window=100&own=1".format(
playqueue.playQueueID
),
},
**params,
),
)
self.update_devices()
@property
def device_state_attributes(self):
"""Return the scene state attributes."""
attr = {
"media_content_rating": self._media_content_rating,
"session_username": self._session_username,
"media_library_name": self._app_name,
}
return attr
| apache-2.0 | 1,241,736,605,791,758,600 | 33.986386 | 88 | 0.564894 | false |
kwikteam/global_superclustering | global_code/emlaunch_synthetic_test_get_initclust.py | 1 | 1230 | #Be sure to run
#python setup.py build_ext --inplace
#before running this script
import pickle
import numpy as np
import matplotlib.pyplot as plt
import sorting
from supercluster import *
from klustakwik2 import *
import imp # lets you reload modules using e.g.imp.reload(sorting)
from IPython import embed
import time
from emcat import KK
from default_parameters import default_parameters
import testing_cat as tc
script_params = default_parameters.copy()
#script_params.update(
# run_monitoring_server=False,
# debug=True,
# )
personal_homedir = '/Users/shabnamkadir/clustering/'
picklefile = personal_homedir + 'global_superclustering/global_code/synthetic_cat.p'
pkl_file = open(picklefile,'rb')
mixture = pickle.load(pkl_file)
pkl_file.close()
#embed()
mixture_dict = mixture[0]
num_starting_clusters = 4 #produces an initial random clustering with 4 starting clusters.
num_spikes = mixture_dict['superclusters'].shape[0]
initclust = tc.generate_random_initial_clustering(num_starting_clusters, num_spikes )
saveinit = True
if saveinit:
with open('init_synthetic_cat_%g.p'%(num_starting_clusters), 'wb') as g:
pickle.dump(initclust, g)
exit()
#superdata used to be called silly
| gpl-2.0 | 7,085,543,806,165,810,000 | 28.285714 | 90 | 0.749593 | false |
djaodjin/djaodjin-pages | pages/api/sources.py | 1 | 5950 | # Copyright (c) 2021, Djaodjin Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#pylint: disable=no-member
import logging, os, tempfile
from django.template import TemplateSyntaxError
from django.template.loader import _engine_list
from django.utils._os import safe_join
from rest_framework import status, generics, serializers
from rest_framework.response import Response
from ..mixins import ThemePackageMixin
from ..themes import check_template, get_theme_dir, get_template_path
LOGGER = logging.getLogger(__name__)
def write_template(template_path, template_source):
check_template(template_source)
base_dir = os.path.dirname(template_path)
if not os.path.isdir(base_dir):
os.makedirs(base_dir)
temp_file = tempfile.NamedTemporaryFile(
mode='w+t', dir=base_dir, delete=False)
temp_file.write(template_source)
temp_file.close()
os.rename(temp_file.name, template_path)
LOGGER.info("pid %d wrote to %s", os.getpid(), template_path)
class SourceCodeSerializer(serializers.Serializer):
path = serializers.CharField(required=False, max_length=255)
text = serializers.CharField(required=False, max_length=100000)
def update(self, instance, validated_data):
pass
def create(self, validated_data):
pass
class SourceDetailAPIView(ThemePackageMixin, generics.RetrieveUpdateAPIView,
generics.CreateAPIView):
"""
Retrieves a template source file
**Examples
.. code-block:: http
GET /api/themes/sources/index.html HTTP/1.1
responds
.. code-block:: json
{
"text": "..."
}
"""
serializer_class = SourceCodeSerializer
def post(self, request, *args, **kwargs):
"""
Creates a template source file
**Examples
.. code-block:: http
POST /api/themes/sources/index.html HTTP/1.1
.. code-block:: json
{
"text": "..."
}
responds
.. code-block:: json
{
"text": "..."
}
"""
#pylint:disable=useless-super-delegation
return super(SourceDetailAPIView, self).post(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
"""
Updates a template source file
**Examples
.. code-block:: http
PUT /api/themes/sources/index.html HTTP/1.1
.. code-block:: json
{
"text": "..."
}
responds
.. code-block:: json
{
"text": "..."
}
"""
#pylint:disable=useless-super-delegation
return super(SourceDetailAPIView, self).put(request, *args, **kwargs)
def retrieve(self, request, *args, **kwargs):
relative_path = self.kwargs.get('page')
with open(get_template_path(
relative_path=relative_path)) as source_file:
source_content = source_file.read()
return Response({'path': relative_path, 'text': source_content})
def update(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
relative_path = self.kwargs.get('page')
template_path = get_template_path(relative_path=relative_path)
theme_base = get_theme_dir(self.theme)
if not template_path.startswith(theme_base):
resp_status = status.HTTP_201_CREATED
template_path = safe_join(theme_base, 'templates', relative_path)
else:
resp_status = status.HTTP_200_OK
# We only write the file if the template syntax is correct.
try:
write_template(template_path, serializer.validated_data['text'])
# clear template loaders caches
engines = _engine_list(using=None)
for engine in engines:
try:
engine.env.cache.clear()
except AttributeError:
pass
except TemplateSyntaxError as err:
LOGGER.debug("%s", err, extra={'request': request})
return self.retrieve(request, *args, **kwargs)
return Response(serializer.data, status=resp_status)
def perform_create(self, serializer): #pylint:disable=unused-argument
relative_path = self.kwargs.get('page')
theme_base = get_theme_dir(self.theme)
template_path = safe_join(theme_base, 'templates', relative_path)
write_template(template_path, '''{% extends "base.html" %}
{% block content %}
<h1>Lorem Ipsum</h1>
{% endblock %}
''')
| bsd-2-clause | 5,008,858,569,694,676,000 | 30.989247 | 78 | 0.64084 | false |
Zlash65/erpnext | erpnext/assets/doctype/asset/asset.py | 1 | 23671 | # -*- coding: utf-8 -*-
# Copyright (c) 2016, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, erpnext, math, json
from frappe import _
from six import string_types
from frappe.utils import flt, add_months, cint, nowdate, getdate, today, date_diff
from frappe.model.document import Document
from erpnext.assets.doctype.asset_category.asset_category import get_asset_category_account
from erpnext.assets.doctype.asset.depreciation \
import get_disposal_account_and_cost_center, get_depreciation_accounts
from erpnext.accounts.general_ledger import make_gl_entries, delete_gl_entries
from erpnext.accounts.utils import get_account_currency
from erpnext.controllers.accounts_controller import AccountsController
class Asset(AccountsController):
def validate(self):
self.validate_asset_values()
self.validate_item()
self.set_missing_values()
if self.calculate_depreciation:
self.set_depreciation_rate()
self.make_depreciation_schedule()
self.set_accumulated_depreciation()
else:
self.finance_books = []
if self.get("schedules"):
self.validate_expected_value_after_useful_life()
self.status = self.get_status()
def on_submit(self):
self.validate_in_use_date()
self.set_status()
self.update_stock_movement()
if not self.booked_fixed_asset and not is_cwip_accounting_disabled():
self.make_gl_entries()
def on_cancel(self):
self.validate_cancellation()
self.delete_depreciation_entries()
self.set_status()
delete_gl_entries(voucher_type='Asset', voucher_no=self.name)
self.db_set('booked_fixed_asset', 0)
def validate_item(self):
item = frappe.get_cached_value("Item", self.item_code,
["is_fixed_asset", "is_stock_item", "disabled"], as_dict=1)
if not item:
frappe.throw(_("Item {0} does not exist").format(self.item_code))
elif item.disabled:
frappe.throw(_("Item {0} has been disabled").format(self.item_code))
elif not item.is_fixed_asset:
frappe.throw(_("Item {0} must be a Fixed Asset Item").format(self.item_code))
elif item.is_stock_item:
frappe.throw(_("Item {0} must be a non-stock item").format(self.item_code))
def validate_in_use_date(self):
if not self.available_for_use_date:
frappe.throw(_("Available for use date is required"))
def set_missing_values(self):
if not self.asset_category:
self.asset_category = frappe.get_cached_value("Item", self.item_code, "asset_category")
if self.item_code and not self.get('finance_books'):
finance_books = get_item_details(self.item_code, self.asset_category)
self.set('finance_books', finance_books)
def validate_asset_values(self):
if not flt(self.gross_purchase_amount):
frappe.throw(_("Gross Purchase Amount is mandatory"), frappe.MandatoryError)
if not is_cwip_accounting_disabled():
if not self.is_existing_asset and not (self.purchase_receipt or self.purchase_invoice):
frappe.throw(_("Please create purchase receipt or purchase invoice for the item {0}").
format(self.item_code))
if (not self.purchase_receipt and self.purchase_invoice
and not frappe.db.get_value('Purchase Invoice', self.purchase_invoice, 'update_stock')):
frappe.throw(_("Update stock must be enable for the purchase invoice {0}").
format(self.purchase_invoice))
if not self.calculate_depreciation:
return
elif not self.finance_books:
frappe.throw(_("Enter depreciation details"))
if self.is_existing_asset:
return
docname = self.purchase_receipt or self.purchase_invoice
if docname:
doctype = 'Purchase Receipt' if self.purchase_receipt else 'Purchase Invoice'
date = frappe.db.get_value(doctype, docname, 'posting_date')
if self.available_for_use_date and getdate(self.available_for_use_date) < getdate(self.purchase_date):
frappe.throw(_("Available-for-use Date should be after purchase date"))
def set_depreciation_rate(self):
for d in self.get("finance_books"):
d.rate_of_depreciation = self.get_depreciation_rate(d, on_validate=True)
def make_depreciation_schedule(self):
depreciation_method = [d.depreciation_method for d in self.finance_books]
if 'Manual' not in depreciation_method:
self.schedules = []
if not self.get("schedules") and self.available_for_use_date:
total_depreciations = sum([d.total_number_of_depreciations for d in self.get('finance_books')])
for d in self.get('finance_books'):
self.validate_asset_finance_books(d)
value_after_depreciation = (flt(self.gross_purchase_amount) -
flt(self.opening_accumulated_depreciation))
d.value_after_depreciation = value_after_depreciation
no_of_depreciations = cint(d.total_number_of_depreciations - 1) - cint(self.number_of_depreciations_booked)
end_date = add_months(d.depreciation_start_date,
no_of_depreciations * cint(d.frequency_of_depreciation))
total_days = date_diff(end_date, self.available_for_use_date)
rate_per_day = (value_after_depreciation - d.get("expected_value_after_useful_life")) / total_days
number_of_pending_depreciations = cint(d.total_number_of_depreciations) - \
cint(self.number_of_depreciations_booked)
from_date = self.available_for_use_date
if number_of_pending_depreciations:
next_depr_date = getdate(add_months(self.available_for_use_date,
number_of_pending_depreciations * 12))
if (cint(frappe.db.get_value("Asset Settings", None, "schedule_based_on_fiscal_year")) == 1
and getdate(d.depreciation_start_date) < next_depr_date):
number_of_pending_depreciations += 1
for n in range(number_of_pending_depreciations):
if n == list(range(number_of_pending_depreciations))[-1]:
schedule_date = add_months(self.available_for_use_date, n * 12)
previous_scheduled_date = add_months(d.depreciation_start_date, (n-1) * 12)
depreciation_amount = \
self.get_depreciation_amount_prorata_temporis(value_after_depreciation,
d, previous_scheduled_date, schedule_date)
elif n == list(range(number_of_pending_depreciations))[0]:
schedule_date = d.depreciation_start_date
depreciation_amount = \
self.get_depreciation_amount_prorata_temporis(value_after_depreciation,
d, self.available_for_use_date, schedule_date)
else:
schedule_date = add_months(d.depreciation_start_date, n * 12)
depreciation_amount = \
self.get_depreciation_amount_prorata_temporis(value_after_depreciation, d)
if value_after_depreciation != 0:
value_after_depreciation -= flt(depreciation_amount)
self.append("schedules", {
"schedule_date": schedule_date,
"depreciation_amount": depreciation_amount,
"depreciation_method": d.depreciation_method,
"finance_book": d.finance_book,
"finance_book_id": d.idx
})
else:
for n in range(number_of_pending_depreciations):
schedule_date = add_months(d.depreciation_start_date,
n * cint(d.frequency_of_depreciation))
if d.depreciation_method in ("Straight Line", "Manual"):
days = date_diff(schedule_date, from_date)
if n == 0: days += 1
depreciation_amount = days * rate_per_day
from_date = schedule_date
else:
depreciation_amount = self.get_depreciation_amount(value_after_depreciation,
d.total_number_of_depreciations, d)
if depreciation_amount:
value_after_depreciation -= flt(depreciation_amount)
self.append("schedules", {
"schedule_date": schedule_date,
"depreciation_amount": depreciation_amount,
"depreciation_method": d.depreciation_method,
"finance_book": d.finance_book,
"finance_book_id": d.idx
})
def validate_asset_finance_books(self, row):
if flt(row.expected_value_after_useful_life) >= flt(self.gross_purchase_amount):
frappe.throw(_("Row {0}: Expected Value After Useful Life must be less than Gross Purchase Amount")
.format(row.idx))
if not row.depreciation_start_date:
frappe.throw(_("Row {0}: Depreciation Start Date is required").format(row.idx))
if not self.is_existing_asset:
self.opening_accumulated_depreciation = 0
self.number_of_depreciations_booked = 0
else:
depreciable_amount = flt(self.gross_purchase_amount) - flt(row.expected_value_after_useful_life)
if flt(self.opening_accumulated_depreciation) > depreciable_amount:
frappe.throw(_("Opening Accumulated Depreciation must be less than equal to {0}")
.format(depreciable_amount))
if self.opening_accumulated_depreciation:
if not self.number_of_depreciations_booked:
frappe.throw(_("Please set Number of Depreciations Booked"))
else:
self.number_of_depreciations_booked = 0
if cint(self.number_of_depreciations_booked) > cint(row.total_number_of_depreciations):
frappe.throw(_("Number of Depreciations Booked cannot be greater than Total Number of Depreciations"))
if row.depreciation_start_date and getdate(row.depreciation_start_date) < getdate(nowdate()):
frappe.msgprint(_("Depreciation Row {0}: Depreciation Start Date is entered as past date")
.format(row.idx), title=_('Warning'), indicator='red')
if row.depreciation_start_date and getdate(row.depreciation_start_date) < getdate(self.purchase_date):
frappe.throw(_("Depreciation Row {0}: Next Depreciation Date cannot be before Purchase Date")
.format(row.idx))
if row.depreciation_start_date and getdate(row.depreciation_start_date) < getdate(self.available_for_use_date):
frappe.throw(_("Depreciation Row {0}: Next Depreciation Date cannot be before Available-for-use Date")
.format(row.idx))
def set_accumulated_depreciation(self, ignore_booked_entry = False):
straight_line_idx = [d.idx for d in self.get("schedules") if d.depreciation_method == 'Straight Line']
finance_books = []
for i, d in enumerate(self.get("schedules")):
if ignore_booked_entry and d.journal_entry:
continue
if d.finance_book_id not in finance_books:
accumulated_depreciation = flt(self.opening_accumulated_depreciation)
value_after_depreciation = flt(self.get_value_after_depreciation(d.finance_book_id))
finance_books.append(d.finance_book_id)
depreciation_amount = flt(d.depreciation_amount, d.precision("depreciation_amount"))
value_after_depreciation -= flt(depreciation_amount)
if straight_line_idx and i == max(straight_line_idx) - 1:
book = self.get('finance_books')[cint(d.finance_book_id) - 1]
depreciation_amount += flt(value_after_depreciation -
flt(book.expected_value_after_useful_life), d.precision("depreciation_amount"))
d.depreciation_amount = depreciation_amount
accumulated_depreciation += d.depreciation_amount
d.accumulated_depreciation_amount = flt(accumulated_depreciation,
d.precision("accumulated_depreciation_amount"))
def get_value_after_depreciation(self, idx):
return flt(self.get('finance_books')[cint(idx)-1].value_after_depreciation)
def get_depreciation_amount(self, depreciable_value, total_number_of_depreciations, row):
if row.depreciation_method in ["Straight Line", "Manual"]:
amt = (flt(self.gross_purchase_amount) - flt(row.expected_value_after_useful_life) -
flt(self.opening_accumulated_depreciation))
depreciation_amount = amt * row.rate_of_depreciation
else:
depreciation_amount = flt(depreciable_value) * (flt(row.rate_of_depreciation) / 100)
value_after_depreciation = flt(depreciable_value) - depreciation_amount
if value_after_depreciation < flt(row.expected_value_after_useful_life):
depreciation_amount = flt(depreciable_value) - flt(row.expected_value_after_useful_life)
return depreciation_amount
def get_depreciation_amount_prorata_temporis(self, depreciable_value, row, start_date=None, end_date=None):
if start_date and end_date:
prorata_temporis = min(abs(flt(date_diff(str(end_date), str(start_date)))) / flt(frappe.db.get_value("Asset Settings", None, "number_of_days_in_fiscal_year")), 1)
else:
prorata_temporis = 1
if row.depreciation_method in ("Straight Line", "Manual"):
depreciation_amount = (flt(row.value_after_depreciation) -
flt(row.expected_value_after_useful_life)) / (cint(row.total_number_of_depreciations) -
cint(self.number_of_depreciations_booked)) * prorata_temporis
else:
depreciation_amount = self.get_depreciation_amount(depreciable_value, row.total_number_of_depreciations, row)
return depreciation_amount
def validate_expected_value_after_useful_life(self):
for row in self.get('finance_books'):
accumulated_depreciation_after_full_schedule = [d.accumulated_depreciation_amount
for d in self.get("schedules") if cint(d.finance_book_id) == row.idx]
if accumulated_depreciation_after_full_schedule:
accumulated_depreciation_after_full_schedule = max(accumulated_depreciation_after_full_schedule)
asset_value_after_full_schedule = flt(flt(self.gross_purchase_amount) -
flt(accumulated_depreciation_after_full_schedule),
self.precision('gross_purchase_amount'))
if row.expected_value_after_useful_life < asset_value_after_full_schedule:
frappe.throw(_("Depreciation Row {0}: Expected value after useful life must be greater than or equal to {1}")
.format(row.idx, asset_value_after_full_schedule))
def validate_cancellation(self):
if self.status not in ("Submitted", "Partially Depreciated", "Fully Depreciated"):
frappe.throw(_("Asset cannot be cancelled, as it is already {0}").format(self.status))
if self.purchase_invoice:
frappe.throw(_("Please cancel Purchase Invoice {0} first").format(self.purchase_invoice))
if self.purchase_receipt:
frappe.throw(_("Please cancel Purchase Receipt {0} first").format(self.purchase_receipt))
def delete_depreciation_entries(self):
for d in self.get("schedules"):
if d.journal_entry:
frappe.get_doc("Journal Entry", d.journal_entry).cancel()
d.db_set("journal_entry", None)
self.db_set("value_after_depreciation",
(flt(self.gross_purchase_amount) - flt(self.opening_accumulated_depreciation)))
def set_status(self, status=None):
'''Get and update status'''
if not status:
status = self.get_status()
self.db_set("status", status)
def get_status(self):
'''Returns status based on whether it is draft, submitted, scrapped or depreciated'''
if self.docstatus == 0:
status = "Draft"
elif self.docstatus == 1:
status = "Submitted"
if self.journal_entry_for_scrap:
status = "Scrapped"
elif self.finance_books:
idx = self.get_default_finance_book_idx() or 0
expected_value_after_useful_life = self.finance_books[idx].expected_value_after_useful_life
value_after_depreciation = self.finance_books[idx].value_after_depreciation
if flt(value_after_depreciation) <= expected_value_after_useful_life:
status = "Fully Depreciated"
elif flt(value_after_depreciation) < flt(self.gross_purchase_amount):
status = 'Partially Depreciated'
elif self.docstatus == 2:
status = "Cancelled"
return status
def get_default_finance_book_idx(self):
if not self.get('default_finance_book') and self.company:
self.default_finance_book = erpnext.get_default_finance_book(self.company)
if self.get('default_finance_book'):
for d in self.get('finance_books'):
if d.finance_book == self.default_finance_book:
return cint(d.idx) - 1
def update_stock_movement(self):
asset_movement = frappe.db.get_value('Asset Movement',
{'asset': self.name, 'reference_name': self.purchase_receipt, 'docstatus': 0}, 'name')
if asset_movement:
doc = frappe.get_doc('Asset Movement', asset_movement)
doc.submit()
def make_gl_entries(self):
gl_entries = []
if ((self.purchase_receipt or (self.purchase_invoice and
frappe.db.get_value('Purchase Invoice', self.purchase_invoice, 'update_stock')))
and self.purchase_receipt_amount and self.available_for_use_date <= nowdate()):
fixed_aseet_account = get_asset_category_account(self.name, 'fixed_asset_account',
asset_category = self.asset_category, company = self.company)
cwip_account = get_asset_account("capital_work_in_progress_account",
self.name, self.asset_category, self.company)
gl_entries.append(self.get_gl_dict({
"account": cwip_account,
"against": fixed_aseet_account,
"remarks": self.get("remarks") or _("Accounting Entry for Asset"),
"posting_date": self.available_for_use_date,
"credit": self.purchase_receipt_amount,
"credit_in_account_currency": self.purchase_receipt_amount,
"cost_center": self.cost_center
}))
gl_entries.append(self.get_gl_dict({
"account": fixed_aseet_account,
"against": cwip_account,
"remarks": self.get("remarks") or _("Accounting Entry for Asset"),
"posting_date": self.available_for_use_date,
"debit": self.purchase_receipt_amount,
"debit_in_account_currency": self.purchase_receipt_amount,
"cost_center": self.cost_center
}))
if gl_entries:
from erpnext.accounts.general_ledger import make_gl_entries
make_gl_entries(gl_entries)
self.db_set('booked_fixed_asset', 1)
def get_depreciation_rate(self, args, on_validate=False):
if isinstance(args, string_types):
args = json.loads(args)
number_of_depreciations_booked = 0
if self.is_existing_asset:
number_of_depreciations_booked = self.number_of_depreciations_booked
float_precision = cint(frappe.db.get_default("float_precision")) or 2
tot_no_of_depreciation = flt(args.get("total_number_of_depreciations")) - flt(number_of_depreciations_booked)
if args.get("depreciation_method") in ["Straight Line", "Manual"]:
return 1.0 / tot_no_of_depreciation
if args.get("depreciation_method") == 'Double Declining Balance':
return 200.0 / args.get("total_number_of_depreciations")
if args.get("depreciation_method") == "Written Down Value":
if args.get("rate_of_depreciation") and on_validate:
return args.get("rate_of_depreciation")
no_of_years = flt(args.get("total_number_of_depreciations") * flt(args.get("frequency_of_depreciation"))) / 12
value = flt(args.get("expected_value_after_useful_life")) / flt(self.gross_purchase_amount)
# square root of flt(salvage_value) / flt(asset_cost)
depreciation_rate = math.pow(value, 1.0/flt(no_of_years, 2))
return 100 * (1 - flt(depreciation_rate, float_precision))
def update_maintenance_status():
assets = frappe.get_all('Asset', filters = {'docstatus': 1, 'maintenance_required': 1})
for asset in assets:
asset = frappe.get_doc("Asset", asset.name)
if frappe.db.exists('Asset Maintenance Task', {'parent': asset.name, 'next_due_date': today()}):
asset.set_status('In Maintenance')
if frappe.db.exists('Asset Repair', {'asset_name': asset.name, 'repair_status': 'Pending'}):
asset.set_status('Out of Order')
def make_post_gl_entry():
if is_cwip_accounting_disabled():
return
assets = frappe.db.sql_list(""" select name from `tabAsset`
where ifnull(booked_fixed_asset, 0) = 0 and available_for_use_date = %s""", nowdate())
for asset in assets:
doc = frappe.get_doc('Asset', asset)
doc.make_gl_entries()
def get_asset_naming_series():
meta = frappe.get_meta('Asset')
return meta.get_field("naming_series").options
@frappe.whitelist()
def make_purchase_invoice(asset, item_code, gross_purchase_amount, company, posting_date):
pi = frappe.new_doc("Purchase Invoice")
pi.company = company
pi.currency = frappe.get_cached_value('Company', company, "default_currency")
pi.set_posting_time = 1
pi.posting_date = posting_date
pi.append("items", {
"item_code": item_code,
"is_fixed_asset": 1,
"asset": asset,
"expense_account": get_asset_category_account(asset, 'fixed_asset_account'),
"qty": 1,
"price_list_rate": gross_purchase_amount,
"rate": gross_purchase_amount
})
pi.set_missing_values()
return pi
@frappe.whitelist()
def make_sales_invoice(asset, item_code, company, serial_no=None):
si = frappe.new_doc("Sales Invoice")
si.company = company
si.currency = frappe.get_cached_value('Company', company, "default_currency")
disposal_account, depreciation_cost_center = get_disposal_account_and_cost_center(company)
si.append("items", {
"item_code": item_code,
"is_fixed_asset": 1,
"asset": asset,
"income_account": disposal_account,
"serial_no": serial_no,
"cost_center": depreciation_cost_center,
"qty": 1
})
si.set_missing_values()
return si
@frappe.whitelist()
def create_asset_maintenance(asset, item_code, item_name, asset_category, company):
asset_maintenance = frappe.new_doc("Asset Maintenance")
asset_maintenance.update({
"asset_name": asset,
"company": company,
"item_code": item_code,
"item_name": item_name,
"asset_category": asset_category
})
return asset_maintenance
@frappe.whitelist()
def create_asset_adjustment(asset, asset_category, company):
asset_maintenance = frappe.new_doc("Asset Value Adjustment")
asset_maintenance.update({
"asset": asset,
"company": company,
"asset_category": asset_category
})
return asset_maintenance
@frappe.whitelist()
def transfer_asset(args):
args = json.loads(args)
if args.get('serial_no'):
args['quantity'] = len(args.get('serial_no').split('\n'))
movement_entry = frappe.new_doc("Asset Movement")
movement_entry.update(args)
movement_entry.insert()
movement_entry.submit()
frappe.db.commit()
frappe.msgprint(_("Asset Movement record {0} created").format("<a href='#Form/Asset Movement/{0}'>{0}</a>".format(movement_entry.name)))
@frappe.whitelist()
def get_item_details(item_code, asset_category):
asset_category_doc = frappe.get_doc('Asset Category', asset_category)
books = []
for d in asset_category_doc.finance_books:
books.append({
'finance_book': d.finance_book,
'depreciation_method': d.depreciation_method,
'total_number_of_depreciations': d.total_number_of_depreciations,
'frequency_of_depreciation': d.frequency_of_depreciation,
'start_date': nowdate()
})
return books
def get_asset_account(account_name, asset=None, asset_category=None, company=None):
account = None
if asset:
account = get_asset_category_account(asset, account_name,
asset_category = asset_category, company = company)
if not account:
account = frappe.get_cached_value('Company', company, account_name)
if not account:
frappe.throw(_("Set {0} in asset category {1} or company {2}")
.format(account_name.replace('_', ' ').title(), asset_category, company))
return account
@frappe.whitelist()
def make_journal_entry(asset_name):
asset = frappe.get_doc("Asset", asset_name)
fixed_asset_account, accumulated_depreciation_account, depreciation_expense_account = \
get_depreciation_accounts(asset)
depreciation_cost_center, depreciation_series = frappe.db.get_value("Company", asset.company,
["depreciation_cost_center", "series_for_depreciation_entry"])
depreciation_cost_center = asset.cost_center or depreciation_cost_center
je = frappe.new_doc("Journal Entry")
je.voucher_type = "Depreciation Entry"
je.naming_series = depreciation_series
je.company = asset.company
je.remark = "Depreciation Entry against asset {0}".format(asset_name)
je.append("accounts", {
"account": depreciation_expense_account,
"reference_type": "Asset",
"reference_name": asset.name,
"cost_center": depreciation_cost_center
})
je.append("accounts", {
"account": accumulated_depreciation_account,
"reference_type": "Asset",
"reference_name": asset.name
})
return je
def is_cwip_accounting_disabled():
return cint(frappe.db.get_single_value("Asset Settings", "disable_cwip_accounting"))
| gpl-3.0 | -5,258,987,987,461,458,000 | 38.320598 | 166 | 0.715137 | false |
AISpace2/AISpace2 | aispace2/jupyter/csp/csp.py | 1 | 23307 | import threading
from functools import partial
from time import sleep
from ipywidgets import register
from traitlets import Bool, Dict, Float, Instance, Unicode
from aipython.cspProblem import CSP
from ... import __version__
from ..stepdomwidget import ReturnableThread, StepDOMWidget
from .cspjsonbridge import (csp_to_json, generate_csp_graph_mappings,
json_to_csp)
@register
class Displayable(StepDOMWidget):
"""A Jupyter widget for visualizing constraint satisfaction problems (CSPs).
Handles arc consistency, domain splitting, and stochastic local search (SLS).
See the accompanying frontend file: `js/src/csp/CSPVisualizer.ts`
"""
_view_name = Unicode('CSPViewer').tag(sync=True)
_model_name = Unicode('CSPViewerModel').tag(sync=True)
_view_module = Unicode('aispace2').tag(sync=True)
_model_module = Unicode('aispace2').tag(sync=True)
_view_module_version = Unicode(__version__).tag(sync=True)
_model_module_version = Unicode(__version__).tag(sync=True)
# The CSP that is synced as a graph to the frontend.
graph = Instance(klass=CSP, allow_none=True).tag(
sync=True, to_json=csp_to_json, from_json=json_to_csp)
# Constrols whether the auto arc consistency button will show up in the widget (will not in SLS)
need_AC_button = Bool(True).tag(sync=True)
# Tracks if the visualization has been rendered at least once in the front-end. See the @visualize decorator.
_previously_rendered = Bool(False).tag(sync=True)
wait_for_render = Bool(True).tag(sync=True)
def __init__(self):
super().__init__()
self.visualizer = self
##############################
### SLS-specific variables ###
##############################
# Tracks if this is the first conflict reported.
# If so, will also compute non-conflicts to highlight green the first time around.
self._sls_first_conflict = True
##########################################
### Arc consistency-specific variables ###
##########################################
# A reference to the arc the user has selected for arc consistency. A tuple of (variable name, Constraint instance).
self._selected_arc = None
# True if the user has selected an arc to perform arc-consistency on. Otherwise, an arc is automatically chosen.
self._has_user_selected_arc = False
# True if the algorithm is at a point where an arc is waiting to be chosen. Used to filter out extraneous clicks otherwise.
self._is_waiting_for_arc_selection = False
###########################################
### Domain splitting-specific variables ###
##########################################
# A reference to the variable the user has selected for domain splitting.
self._selected_var = None
# True if the user has selected a var to perform domain splitting on. Otherwise, a variable is automatically chosen.
self._has_user_selected_var = False
# True if the algorithm is at a point where a var is waiting to be chosen. Used to filter out extraneous clicks otherwise.
self._is_waiting_for_var_selection = False
# The domain the user has chosen as their first split for `_selected_var`.
self._domain_split = None
# self.graph = self.csp
self.graph = CSP(self.csp.domains,
self.csp.constraints, self.csp.positions)
(self._domain_map, self._edge_map) = generate_csp_graph_mappings(self.csp)
self._initialize_controls()
def wait_for_arc_selection(self, to_do):
"""Pauses execution until an arc has been selected and returned.
If the algorithm is running in auto mode, an arc is returned immediately.
Otherwise, this function blocks until an arc is selected by the user.
Args:
to_do (set): A set of arcs to choose from. This set will be modified.
Returns:
(string, Constraint):
A tuple (var_name, constraint) that represents an arc from `to_do`.
"""
# Running in Auto mode. Don't block!
if self.max_display_level == 1 or self.max_display_level == 0:
return to_do.pop()
self._is_waiting_for_arc_selection = True
self._block_for_user_input.wait()
if self._has_user_selected_arc:
self._has_user_selected_arc = False
to_do.discard(self._selected_arc)
return self._selected_arc
# User did not select. Return random arc.
return to_do.pop()
def wait_for_var_selection(self, iter_var):
"""Pauses execution until a variable has been selected and returned.
If the user steps instead of clicking on a variable, a random variable is returned.
Otherwise, the variable clicked by the user is returned, but only if it is a variable
that can be split on. Otherwise, this function continues waiting.
Args:
iter_var (iter): Variables that the user is allowed to split on.
Returns:
(string): The variable to split on.
"""
# Running in Auto mode. Split in half!
if self.max_display_level == 1:
return list(iter_var)[0]
# Running in Auto Arc Consistency mode. Change to normal!
if self.max_display_level == 0:
self.max_display_level = 2
iter_var = list(iter_var)
self._send_highlight_splittable_nodes_action(iter_var)
self._is_waiting_for_var_selection = True
self._block_for_user_input.wait()
while (self.max_display_level != 1 and not self._has_user_selected_var):
self._block_for_user_input.wait()
if self._has_user_selected_var:
self._has_user_selected_var = False
if self._selected_var in iter_var:
return self._selected_var
else:
return self.wait_for_var_selection(iter_var)
self._is_waiting_for_var_selection = False
return iter_var[0]
def choose_domain_partition(self, domain, var):
"""Pauses execution until a domain has been split on.
If the user chooses to not select a domain (clicks 'Cancel'), splits the domain in half.
Otherwise, the subset of the domain chosen by the user is used as the initial split.
Args:
domain (set): Domain of the variable being split on.
Returns:
(set): A subset of the domain to be split on first.
"""
# Running in Auto mode. Split in half!
if self.max_display_level == 1:
split = len(domain) // 2
dom1 = set(list(domain)[:split])
dom2 = domain - dom1
return dom1, dom2
if self._domain_split is None:
# Split in half
split = len(domain) // 2
dom1 = set(list(domain)[:split])
dom2 = domain - dom1
return dom1, dom2
# make sure type of chosen domain matches original domain
if all(isinstance(n, int) for n in domain):
number_domain = set()
for n in self._domain_split:
number_domain.add(int(n))
self._domain_split = number_domain
split1 = set(self._domain_split)
split2 = set(domain) - split1
return split1, split2
def handle_custom_msgs(self, _, content, buffers=None):
super().handle_custom_msgs(None, content, buffers)
event = content.get('event', '')
if event == 'arc:click':
"""
Expects a dictionary containing:
varName (string): The name of the variable connected to this arc.
constId (string): The id of the constraint connected to this arc.
"""
if self._is_waiting_for_arc_selection:
var_name = content.get('varName')
const = self.csp.constraints[content.get('constId')]
self.max_display_level = 2
self._selected_arc = (var_name, const)
self._has_user_selected_arc = True
self._block_for_user_input.set()
self._block_for_user_input.clear()
self._is_waiting_for_arc_selection = False
elif event == 'var:click':
"""
Expects a dictionary containing:
varName (string): The name of the variable to split on.
"""
if not self._is_waiting_for_var_selection and content.get('varType') == 'csp:variable':
self.send({'action': 'chooseDomainSplitBeforeAC'})
elif event == 'domain_split':
"""
Expects a dictionary containing:
domain (string[]|None):
An array of the elements in the domain to first split on, or None if no choice is made.
In this case, splits the domain in half as a default.
"""
domain = content.get('domain')
var_name = content.get('var')
self._selected_var = var_name
self._domain_split = domain
self._has_user_selected_var = True
self._block_for_user_input.set()
self._block_for_user_input.clear()
self._is_waiting_for_var_selection = False
elif event == 'reset':
"""
Reset the algorithm and graph
"""
# Before resetting backend, freeze the execution of queued function to avoid undetermined state
self._pause()
# Wait until freezeing completed
sleep(0.2)
# Reset algorithm related variables
user_sleep_time = getattr(self, 'sleep_time', None)
super().__init__()
self.sleep_time = user_sleep_time
self.visualizer = self
self._sls_first_conflict = True
self._selected_arc = None
self._has_user_selected_arc = False
self._is_waiting_for_arc_selection = False
self._selected_var = None
self._has_user_selected_var = False
self._is_waiting_for_var_selection = False
self._domain_split = None
self.graph = CSP(self.csp.domains,
self.csp.constraints, self.csp.positions)
(self._domain_map, self._edge_map) = generate_csp_graph_mappings(self.csp)
# Tell frontend that it is ready to reset frontend graph and able to restart algorithm
self.send({'action': 'frontReset'})
# Terminate current running thread
if self._thread:
self.stop_thread(self._thread)
elif event == 'initial_render':
queued_func = getattr(self, '_queued_func', None)
# Run queued function after we know the frontend view exists
if queued_func:
func = queued_func['func']
args = queued_func['args']
kwargs = queued_func['kwargs']
self._previously_rendered = True
self._thread = ReturnableThread(
target=func, args=args, kwargs=kwargs)
self._thread.start()
elif event == 'update_sleep_time':
self.sleep_time = content.get('sleepTime')
def display(self, level, *args, **kwargs):
if self.wait_for_render is False:
return
if self._request_backtrack is True:
return
should_wait = True
if args[0] == 'Performing AC with domains':
should_wait = False
domains = args[1]
vars_to_change = []
domains_to_change = []
for var, domain in domains.items():
vars_to_change.append(var)
domains_to_change.append(domain)
self._send_set_domains_action(vars_to_change, domains_to_change)
elif args[0] == 'Domain pruned':
variable = args[2]
domain = args[4]
constraint = args[6]
self._send_set_domains_action(variable, domain)
self._send_highlight_arcs_action(
(variable, constraint), style='bold', colour='green')
elif args[0] == "Processing arc (":
variable = args[1]
constraint = args[3]
self._send_highlight_arcs_action(
(variable, constraint), style='bold', colour=None)
elif args[0] == "Arc: (" and args[4] == ") is inconsistent":
variable = args[1]
constraint = args[3]
self._send_highlight_arcs_action(
(variable, constraint), style='bold', colour='red')
elif args[0] == "Arc: (" and args[4] == ") now consistent":
variable = args[1]
constraint = args[3]
self._send_highlight_arcs_action(
(variable, constraint), style='normal', colour='green')
should_wait = False
elif (args[0] == "Adding" or args[0] == "New domain. Adding") and args[2] == "to to_do.":
if args[1] != "nothing":
arcs = list(args[1])
arcs_to_highlight = []
for arc in arcs:
arcs_to_highlight.append((arc[0], arc[1]))
self._send_highlight_arcs_action(
arcs_to_highlight, style='normal', colour='blue')
elif args[0] == "You can now split domain. Click on a variable whose domain has more than 1 value.":
self.send({'action': 'chooseDomainSplit'})
elif args[0] == "... splitting":
self.send(
{'action': 'setOrder', 'var': args[1], 'domain': args[3], 'other': args[5]})
elif args[0] == "Solution found:":
if self.max_display_level == 0:
self.max_display_level = 2
solString = ""
for var in args[1]:
solString += var + "=" + str(args[1][var]) + ", "
solString = solString[:-2]
self.send({'action': 'setPreSolution', 'solution': solString})
args += ("\nClick Fine Step, Step, Auto Arc Consistency, Auto Solve to find solutions in other domains.", )
elif args[0] == "Solving new domain with":
self.send(
{'action': 'setSplit', 'domain': args[2], 'var': args[1]})
elif args[0] == "Click Fine Step, Step, Auto Arc Consistency, Auto Solve to find solutions in other domains.":
if self.max_display_level == 0:
self.max_display_level = 2
self.send({'action': 'noSolution'})
#############################
### SLS-specific displays ###
#############################
elif args[0] == "Initial assignment":
assignment = args[1]
for (key, val) in assignment.items():
self._send_set_domains_action(key, [val])
elif args[0] == "Assigning" and args[2] == "=":
var = args[1]
domain = args[3]
self._send_set_domains_action(var, [domain])
self._send_highlight_nodes_action(var, "blue")
elif args[0] == "Checking":
node = args[1]
self._send_highlight_nodes_action(node, "blue")
elif args[0] == "Still inconsistent":
const = args[1]
nodes_to_highlight = {const}
arcs_to_highlight = []
for var in const.scope:
nodes_to_highlight.add(var)
arcs_to_highlight.append((var, const))
self._send_highlight_nodes_action(nodes_to_highlight, "red")
self._send_highlight_arcs_action(arcs_to_highlight, "bold", "red")
elif args[0] == "Still consistent":
const = args[1]
nodes_to_highlight = {const}
arcs_to_highlight = []
for var in const.scope:
nodes_to_highlight.add(var)
arcs_to_highlight.append((var, const))
self._send_highlight_nodes_action(nodes_to_highlight, "green")
self._send_highlight_arcs_action(
arcs_to_highlight, "bold", "green")
elif args[0] == "Became consistent":
const = args[1]
nodes_to_highlight = {const}
arcs_to_highlight = []
for var in const.scope:
nodes_to_highlight.add(var)
arcs_to_highlight.append((var, const))
self._send_highlight_nodes_action(nodes_to_highlight, "green")
self._send_highlight_arcs_action(
arcs_to_highlight, "bold", "green")
elif args[0] == "Became inconsistent":
const = args[1]
nodes_to_highlight = {const}
arcs_to_highlight = []
for var in const.scope:
nodes_to_highlight.add(var)
arcs_to_highlight.append((var, const))
self._send_highlight_nodes_action(nodes_to_highlight, "red")
self._send_highlight_arcs_action(arcs_to_highlight, "bold", "red")
elif args[0] == "AC done. Reduced domains":
should_wait = False
elif args[0] == "Conflicts:":
conflicts = args[1]
conflict_nodes_to_highlight = set()
conflict_arcs_to_highlight = []
non_conflict_nodes_to_highlight = set()
non_conflict_arcs_to_highlight = []
if self._sls_first_conflict:
# Highlight all non-conflicts green
self._sls_first_conflict = False
not_conflicts = set(self.csp.constraints) - conflicts
for not_conflict in not_conflicts:
non_conflict_nodes_to_highlight.add(not_conflict)
for node in not_conflict.scope:
non_conflict_nodes_to_highlight.add(node)
non_conflict_arcs_to_highlight.append(
(node, not_conflict))
self._send_highlight_nodes_action(
non_conflict_nodes_to_highlight, "green")
self._send_highlight_arcs_action(
non_conflict_arcs_to_highlight, "bold", "green")
# Highlight all conflicts red
for conflict in conflicts:
conflict_nodes_to_highlight.add(conflict)
for node in conflict.scope:
conflict_nodes_to_highlight.add(node)
conflict_arcs_to_highlight.append((node, conflict))
self._send_highlight_nodes_action(
conflict_nodes_to_highlight, "red")
self._send_highlight_arcs_action(
conflict_arcs_to_highlight, "bold", "red")
super().display(level, *args, **dict(kwargs, should_wait=should_wait))
def _send_highlight_nodes_action(self, vars, colour):
"""Sends a message to the front-end visualization to highlight nodes.
Args:
vars (string|string[]): The name(s) of the variables to highlight.
colour (string|None): A HTML colour string for the stroke of the node.
Passing in None will keep the existing stroke of the node.
"""
# We don't want to check if it is iterable because a string is iterable
if not isinstance(vars, list) and not isinstance(vars, set):
vars = [vars]
nodeIds = []
for var in vars:
nodeIds.append(self._domain_map[var])
self.send({
'action': 'highlightNodes',
'nodeIds': nodeIds,
'colour': colour
})
def _send_highlight_splittable_nodes_action(self, vars):
"""Sends a message to the front-end visualization to highlight Splittable nodes when users can split domain.
Args:
vars (string|string[]): The name(s) of the splittable variables to highlight.
"""
# We don't want to check if it is iterable because a string is iterable
if not isinstance(vars, list) and not isinstance(vars, set):
vars = [vars]
nodeIds = []
for var in vars:
nodeIds.append(self._domain_map[var])
self.send({
'action': 'highlightSplittableNodes',
'nodeIds': nodeIds,
})
def _send_highlight_arcs_action(self, arcs, style='normal', colour=None):
"""Sends a message to the front-end visualization to highlight arcs.
Args:
arcs ((string, Constraint)|(string, Constraint)[]):
Tuples of (variable name, Constraint instance) that form an arc.
For convenience, you do not need to pass a list of tuples of you only have one to highlight.
style ('normal'|'bold'): Style of the highlight. Applied to every arc passed in.
colour (string|None): A HTML colour string for the colour of the line.
Passing in None will keep the existing colour of the arcs.
"""
if not isinstance(arcs, list):
arcs = [arcs]
arc_ids = []
for arc in arcs:
arc_ids.append(self._edge_map[arc])
self.send({
'action': 'highlightArcs',
'arcIds': arc_ids,
'style': style,
'colour': colour
})
def _send_set_domains_action(self, vars, domains):
"""Sends a message to the front-end visualization to set the domains of variables.
Args:
vars (string|string[]): The name of the variable(s) whose domain should be changed.
domains (List[int|string]|List[List[int|string]]): The updated domain of the variable(s).
If vars is an array, then domain is an array of domains, in the same order.
"""
is_single_var = False
if not isinstance(vars, list):
vars = [vars]
is_single_var = True
self.send({
'action':
'setDomains',
'nodeIds': [self._domain_map[var] for var in vars],
'domains': [list(domain) for domain in domains]
if not is_single_var else [domains]
})
def visualize(func_to_delay):
"""Enqueues a function that does not run until the Jupyter widget has rendered.
Once the Jupyter widget has rendered once, further invocation of the wrapped function
behave as if unwrapped. Necessary because otherwise, the function runs (and blocks when display is called)
immediately, before the view has a chance to render
(and so there is no way to unblock using the step buttons!)
Args:
func_to_delay (function): The function to delay.
Returns:
The original function, wrapped such that it will automatically run
when the Jupyter widget is rendered.
"""
def wrapper(self, *args, **kwargs):
if self._previously_rendered is False and self.wait_for_render:
self._queued_func = {
'func': partial(func_to_delay, self),
'args': args,
'kwargs': kwargs
}
else:
return func_to_delay(self, *args, **kwargs)
return wrapper
| gpl-3.0 | -6,535,950,557,674,089,000 | 38.105705 | 131 | 0.562449 | false |
lukefrasera/cs775Homework | hw_002/scripts/gaussian_classify.py | 1 | 9070 | #!/usr/bin/env python
import numpy as np
from numpy import linalg as la
import matplotlib.pyplot as plt
import argparse
import os
import pdb
from scipy import spatial
import time
import operator
'''
Python Program demonstrating the use of a gaussian classifier.
'''
#KNNCLassifier returns a tuple of the K closest feature vectors
def KNNSearch(k, features, test_data):
test_data_classification = []
for test_index, test_element in enumerate(test_data):
if test_element == []:
continue
neighborDistance = []
for feature_index,feature in enumerate(features):
try:
distance = la.norm(feature-test_element)
except ValueError:
pdb.set_trace()
neighborDistance.append([distance, feature_index])
neighborDistance = sorted(neighborDistance, key=lambda row: row[0], reverse=True)
#pdb.set_trace()
test_data_classification.append(np.matrix(neighborDistance[0:k][1]))
pdb.set_trace()
return test_data_classification
def KNNSearchFast(k, features, test_data):
t0 = time.time()
tree = spatial.KDTree(features)
t1 = time.time()
result = tree.query(test_data, k)
t2 = time.time()
print "Build time: %f, query time: %f" % (t1-t0, t2-t1)
return result
def KNNClassify(train_classification, test_neighbors):
test_classification = []
for sample in test_neighbors[1]:
votes = [0 for x in xrange(10)]
try:
for neighbor in sample:
sample_class = int(train_classification[neighbor])
votes[sample_class] += 1
except TypeError:
#catch the case where K=1
sample_class = int(train_classification[sample])
votes[sample_class] = 1
classification = max(enumerate(votes), key=operator.itemgetter(1))[0]
test_classification.append(classification)
return test_classification
def LSESearch(features,classification, test_data):
features = np.matrix(features)
classification = np.matrix(classification).T
test_data = np.matrix(test_data)
filter = la.inv(features.T * features) * features.T * classification
test_data_classification = []
classification = (test_data * filter)
classification[classification < 0] = -1
classification[classification >=0] = 1
return classification
def ParseData(raw_data, class1, class2):
raw_data = raw_data.rstrip('\n')
raw_data_list = raw_data.split('\n')
data_list = list()
for raw_data_point in raw_data_list:
raw_data_point = raw_data_point.rstrip()
point = raw_data_point.split(' ')
data_list.append([float(x) for x in point])
data_list.pop()
data_list_np = np.array(data_list)
mask = (data_list_np[:,0] == class1) + (data_list_np[:,0] == class2)
data_list_np = data_list_np[mask]
return data_list_np
def GaussianBuild(features, classification, classa, classb):
pdb.set_trace()
classaFeaturesMask = (classification == classa)
classbFeaturesMask = (classification == classb)
aFeatures = np.array(features)[classaFeaturesMask].T
bFeatures = np.array(features)[classbFeaturesMask].T
print 'Of ',features.shape,'Elements, ',aFeatures.shape,' are of class A, ',bFeatures.shape,' are of class B'
aCovMat = np.cov(aFeatures)
aMeanMat = np.mean(aFeatures,1)
bCovMat = np.cov(bFeatures)
bMeanMat = np.mean(bFeatures,1)
return [aCovMat,aMeanMat,bCovMat,bMeanMat]
def ComputeGaussianProbability(covMat, meanMat, sample):
meanMat = np.matrix(meanMat).T
sample = sample.T
#sample = meanMat
nonInvertible = True
eyeScale = 0.0
while nonInvertible:
nonInvertible = False
try:
covMatInverse = la.inv(covMat + np.eye(covMat.shape[0])*eyeScale)
except la.linalg.LinAlgError:
nonInvertible = True
eyeScale = eyeScale + 0.0001
if eyeScale > 0.002:
print 'Set lambda to ',eyeScale,' to make covMat invertible'
probability = 1.0/(np.sqrt(la.norm(2*np.pi*covMat)))
probability *= np.exp(-0.5*(sample-meanMat).T*covMatInverse*(sample-meanMat))
return probability
def GaussianClassify(aCovMat, aMeanMat, bCovMat, bMeanMat, test_data):
#for each sample, compute the probability of it belonging to each class
for sample in test_data:
#pdb.set_trace()
probability_a = ComputeGaussianProbability(aCovMat, aMeanMat, sample)
probability_b = ComputeGaussianProbability(bCovMat, bMeanMat, sample)
print 'Sample P(A)=',probability_a,'Sample P(B)=',probability_b
def main():
parser = argparse.ArgumentParser(description='Process input')
parser.add_argument('-t', '--training_file', type=str, help='submit data to train against')
parser.add_argument('-f', '--testing_file', type=str, help='submit data to test the trained model against')
parser.add_argument('-s', '--save_model', type=str, help='save out trained model')
parser.add_argument('-r', '--read_model', type=str, help='read in trained model')
parser.add_argument('-k', '--k_neighbors', type=int, help='number of neighbors to find')
parser.add_argument('-a', '--classa', type=int, help='class to test/train on')
parser.add_argument('-b', '--classb', type=int, help='class to test/train on')
parser.add_argument('-m', '--method', type=int, help='0=KNN,1=LSE,2=Gauss')
args = parser.parse_args()
# Check if Arguments allow execution
if (not args.training_file) and (not args.read_model):
print "Error: No training Data or model present!"
return -1
if args.training_file and args.read_model:
print "Error: cannot read model and traing data at the same time!"
return -1
if args.training_file:
# trainagainst training file
if not os.path.isfile(args.training_file):
print "Error: Training file doesn't exist!"
return -1
# train
with open(args.training_file) as file:
# read file contents
raw_data = file.read()
# parse data
data = ParseData(raw_data, args.classa, args.classb)
# train on data
classification = data[:,0]
features = np.matrix(data[:,1:])
if args.testing_file:
with open(args.testing_file) as test_file:
raw_test_data = test_file.read()
test_data = ParseData(raw_test_data, args.classa, args.classb)
test_data_truth = test_data[:,0]
test_data = np.matrix(test_data[:,1:])
if args.method == 0:
#Do KNN classification
nearest_neighbors = KNNSearchFast(args.k_neighbors, features, test_data)
print "Num training samples: %d, num test samples: %d" % (len(classification), len(test_data_truth))
classification = KNNClassify(classification, nearest_neighbors)
#Compute the error rate
errors = test_data_truth - classification
misclassification_a = errors[errors == args.classa - args.classb]
misclassification_b = errors[errors == args.classb - args.classa]
mask = errors != 0
num_errors = sum(mask)
print "Error rate: %f%%" % (float(num_errors)/len(test_data_truth)*100)
print "Percentage of %d's misclassified: %f" % (args.classa,
float(misclassification_a.size)/test_data_truth[test_data_truth == args.classa].size*100)
print "Percentage of %d's misclassified: %f" % (args.classb, float(misclassification_b.size)/test_data_truth[test_data_truth == args.classb].size*100)
if args.method == 1:
#Do LSE classification
#make classification binary
classification[classification == args.classa] = -1
classification[classification == args.classb] = 1
#Perform the classficiation on the test data
test_data_classification = LSESearch(features, classification, test_data)
test_data_truth[test_data_truth == args.classa] = -1
test_data_truth[test_data_truth == args.classb] = 1
#Compute the error rate
errors = test_data_classification.T - np.matrix(test_data_truth)
misclassification_a = errors[errors == 2]
misclassification_b = errors[errors == -2]
num_errors = np.sum(np.absolute(errors))
print "Num training samples: %d, num test samples: %d" % (len(classification), len(test_data_truth))
print "Error rate: %f%%" % (float(num_errors)/len(test_data_truth)*100)
print "Percentage of %d's misclassified: %f" % (args.classa, float(misclassification_a.size)/test_data_truth[test_data_truth == -1].size*100)
print "Percentage of %d's misclassified: %f" % (args.classb, float(misclassification_b.size)/test_data_truth[test_data_truth == 1].size*100)
if args.method == 2:
#build the gaussian model
[aCovMat, aMeanMat, bCovMat, bMeanMat] = GaussianBuild(features, classification, args.classa, args.classb)
GaussianClassify(aCovMat, aMeanMat, bCovMat, bMeanMat, features)
if __name__ == '__main__':
main()
| lgpl-3.0 | -3,786,826,768,799,533,600 | 42.605769 | 159 | 0.653032 | false |
srowe/xen-api | scripts/examples/smapiv2.py | 8 | 9466 | #!/usr/bin/env python
import os, sys, time, socket, traceback
log_f = os.fdopen(os.dup(sys.stdout.fileno()), "aw")
pid = None
def reopenlog(log_file):
global log_f
if log_f:
log_f.close()
if log_file:
log_f = open(log_file, "aw")
else:
log_f = os.fdopen(os.dup(sys.stdout.fileno()), "aw")
def log(txt):
global log_f, pid
if not pid:
pid = os.getpid()
t = time.strftime("%Y%m%dT%H:%M:%SZ", time.gmtime())
print >>log_f, "%s [%d] %s" % (t, pid, txt)
log_f.flush()
# Functions to construct SMAPI return types #################################
unit = [ "Success", "Unit" ]
# Throw this to return an SR_BACKEND_FAILURE to the caller ##################
class BackendError(Exception):
def __init__(self, code, params):
self.code = code
self.params = params
def __str__(self):
return "BackendError(%s, %s)" % (self.code, ", ".join(self.params))
class Vdi_does_not_exist(Exception):
def __init__(self, vdi):
self.vdi = vdi
def __str__(self):
return "Vdi_does_not_exist(%s)" % self.vdi
def vdi(vdi_info):
# return ['Success', ['Vdi', {'vdi': location, 'virtual_size': str(virtual_size) }]]
return ['Success', ['Vdi', vdi_info]]
def vdis(vis):
return ['Success', ['Vdis', vis]]
def params(params):
return ['Success', ['Params', params ]]
def value(result):
return { "Status": "Success", "Value": result }
def backend_error(code, params):
return [ "Failure", [ "Backend_error", code, params ] ]
def internal_error(txt):
return [ "Failure", "Internal_error", txt ]
def vdi_does_not_exist():
return [ "Failure", "Vdi_does_not_exist" ]
# Type-checking helper functions ############################################
vdi_info_types = {
"vdi": type(""),
"name_label": type(""),
"name_description": type(""),
"ty": type(""),
"metadata_of_pool": type(""),
"is_a_snapshot": type(True),
"snapshot_time": type(""),
"snapshot_of": type(""),
"read_only": type(True),
"cbt_enabled": type(True),
"virtual_size": type(""),
"physical_utilisation": type("")
}
def make_vdi_info(v):
global vdi_info_types
for k in vdi_info_types.keys():
t = vdi_info_types[k]
if t == type(""):
v[k] = str(v[k])
elif t == type(True):
v[k] = str(v[k]).lower() == "true"
else:
raise (BackendError("make_vdi_info unknown type", [ str(t) ]))
return v
def vdi_info(v):
global vdi_info_types
for k in vdi_info_types.keys():
if k not in v:
raise (BackendError("vdi_info missing key", [ k, repr(v) ]))
t = vdi_info_types[k]
if type(v[k]) <> t:
raise (BackendError("vdi_info key has wrong type", [ k, str(t), str(type(v[k])) ]))
return v
def expect_none(x):
if x <> None:
raise (BackendError("type error", [ "None", repr(x) ]))
def expect_long(x):
if type(x) <> type(0L):
raise (BackendError("type error", [ "long int", repr(x) ]))
def expect_string(x):
if type(x) <> type(""):
raise (BackendError("type error", [ "string", repr(x) ]))
# Well-known feature flags understood by xapi ##############################
feature_sr_probe = "SR_PROBE"
feature_sr_update = "SR_UPDATE"
feature_sr_supports_local_caching = "SR_SUPPORTS_LOCAL_CACHING"
feature_vdi_create = "VDI_CREATE"
feature_vdi_destroy = "VDI_DESTROY"
feature_vdi_attach = "VDI_ATTACH"
feature_vdi_detach = "VDI_DETACH"
feature_vdi_resize = "VDI_RESIZE"
feature_vdi_resize_online = "VDI_RESIZE_ONLINE"
feature_vdi_clone = "VDI_CLONE"
feature_vdi_snapshot = "VDI_SNAPSHOT"
feature_vdi_activate = "VDI_ACTIVATE"
feature_vdi_deactivate = "VDI_DEACTIVATE"
feature_vdi_update = "VDI_UPDATE"
feature_vdi_introduce = "VDI_INTRODUCE"
feature_vdi_generate_config = "VDI_GENERATE_CONFIG"
feature_vdi_reset_on_boot = "VDI_RESET_ON_BOOT"
# Unmarshals arguments and marshals results (including exceptions) ##########
class Marshall:
def __init__(self, x):
self.x = x
def query(self, args):
result = self.x.query()
return value(result)
def sr_attach(self, args):
result = self.x.sr_attach(args["task"], args["sr"], args["device_config"])
expect_none(result)
return value(unit)
def sr_detach(self, args):
result = self.x.sr_detach(args["task"], args["sr"])
expect_none(result)
return value(unit)
def sr_destroy(self, args):
result = self.x.sr_destroy(args["task"], args["sr"])
expect_none(result)
return value(unit)
def sr_scan(self, args):
vis = self.x.sr_scan(args["task"], args["sr"])
result = map(lambda vi: vdi_info(vi), vis)
return value(vdis(result))
def vdi_create(self, args):
vi = self.x.vdi_create(args["task"], args["sr"], vdi_info(args["vdi_info"]), args["params"])
return value(vdi(vdi_info(vi)))
def vdi_destroy(self, args):
result = self.x.vdi_destroy(args["task"], args["sr"], args["vdi"])
expect_none(result)
return value(unit)
def vdi_attach(self, args):
result = self.x.vdi_attach(args["task"], args["dp"], args["sr"], args["vdi"], args["read_write"])
expect_string(result)
return value(params(result))
def vdi_activate(self, args):
result = self.x.vdi_activate(args["task"], args["dp"], args["sr"], args["vdi"])
expect_none(result)
return value(unit)
def vdi_deactivate(self, args):
result = self.x.vdi_deactivate(args["task"], args["dp"], args["sr"], args["vdi"])
expect_none(result)
return value(unit)
def vdi_detach(self, args):
result = self.x.vdi_detach(args["task"], args["dp"], args["sr"], args["vdi"])
expect_none(result)
return value(unit)
def _dispatch(self, method, params):
try:
log("method = %s params = %s" % (method, repr(params)))
args = params[0]
if method == "query":
return self.query(args)
elif method == "SR.attach":
return self.sr_attach(args)
elif method == "SR.detach":
return self.sr_detach(args)
elif method == "SR.scan":
return self.sr_scan(args)
elif method == "VDI.create":
return self.vdi_create(args)
elif method == "VDI.destroy":
return self.vdi_destroy(args)
elif method == "VDI.attach":
return self.vdi_attach(args)
elif method == "VDI.activate":
return self.vdi_activate(args)
elif method == "VDI.deactivate":
return self.vdi_deactivate(args)
elif method == "VDI.detach":
return self.vdi_detach(args)
except BackendError, e:
log("caught %s" % e)
traceback.print_exc()
return value(backend_error(e.code, e.params))
except Vdi_does_not_exist, e:
log("caught %s" %e)
return value(vdi_does_not_exist())
except Exception, e:
log("caught %s" % e)
traceback.print_exc()
return value(internal_error(str(e)))
# Helper function to daemonise ##############################################
def daemonize():
def fork():
try:
if os.fork() > 0:
# parent
sys.exit(0)
except Exception, e:
print >>sys.stderr, "fork() failed: %s" % e
traceback.print_exc()
raise
fork()
os.umask(0)
os.chdir("/")
os.setsid()
fork()
devnull = open("/dev/null", "r")
os.dup2(devnull.fileno(), sys.stdin.fileno())
devnull = open("/dev/null", "aw")
os.dup2(devnull.fileno(), sys.stdout.fileno())
os.dup2(devnull.fileno(), sys.stderr.fileno())
from SimpleXMLRPCServer import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
# Server XMLRPC from any HTTP POST path #####################################
class RequestHandler(SimpleXMLRPCRequestHandler):
rpc_paths = []
# SimpleXMLRPCServer with SO_REUSEADDR ######################################
class Server(SimpleXMLRPCServer):
def __init__(self, ip, port):
SimpleXMLRPCServer.__init__(self, (ip, port), requestHandler=RequestHandler)
def server_bind(self):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
SimpleXMLRPCServer.server_bind(self)
# This is a hack to patch slow socket.getfqdn calls that
# BaseHTTPServer (and its subclasses) make.
# See: http://bugs.python.org/issue6085
# See: http://www.answermysearches.com/xmlrpc-server-slow-in-python-how-to-fix/2140/
import BaseHTTPServer
def _bare_address_string(self):
host, port = self.client_address[:2]
return '%s' % host
BaseHTTPServer.BaseHTTPRequestHandler.address_string = \
_bare_address_string
# Given an implementation, serve requests forever ###########################
def start(impl, ip, port, daemon):
if daemon:
log("daemonising")
daemonize()
log("will listen on %s:%d" % (ip, port))
server = Server(ip, port)
log("server registered on %s:%d" % (ip, port))
server.register_introspection_functions() # for debugging
server.register_instance(Marshall(impl))
log("serving requests forever")
server.serve_forever()
| lgpl-2.1 | -7,150,863,785,080,457,000 | 31.754325 | 105 | 0.570251 | false |
Oksisane/RSS-Bot | Trolly-master/trolly/board.py | 1 | 5227 | """
Created on 8 Nov 2012
@author: plish
"""
from trolly.trelloobject import TrelloObject
class Board(TrelloObject):
"""
Class representing a Trello Board
"""
def __init__(self, trello_client, board_id, name=''):
super(Board, self).__init__(trello_client)
self.id = board_id
self.name = name
self.base_uri = '/boards/' + self.id
def get_board_information(self, query_params=None):
"""
Get all information for this board. Returns a dictionary of values.
"""
return self.fetch_json(
uri_path='/boards/' + self.id,
query_params=query_params or {}
)
def get_lists(self):
"""
Get the lists attached to this board. Returns a list of List objects.
"""
lists = self.get_lists_json(self.base_uri)
lists_list = []
for list_json in lists:
lists_list.append(self.create_list(list_json))
return lists_list
def get_cards(self):
"""
Get the cards for this board. Returns a list of Card objects.
"""
cards = self.get_cards_json(self.base_uri)
cards_list = []
for card_json in cards:
cards_list.append(self.create_card(card_json))
return cards_list
def get_card(self, card_id):
"""
Get a Card for a given card id. Returns a Card object.
"""
card_json = self.fetch_json(
uri_path=self.base_uri + '/cards/' + card_id
)
return self.create_card(card_json)
def get_members(self):
"""
Get Members attached to this board. Returns a list of Member objects.
"""
members = self.get_members_json(self.base_uri)
members_list = []
for member_json in members:
members_list.append(self.create_member(member_json))
return members_list
def get_organisation(self):
"""
Get the Organisation for this board. Returns Organisation object.
"""
organisation_json = self.get_organisations_json(self.base_uri)
return self.create_organisation(organisation_json)
def update_board(self, query_params=None):
"""
Update this board's information. Returns a new board.
"""
board_json = self.fetch_json(
uri_path=self.base_uri,
http_method='PUT',
query_params=query_params or {}
)
return self.create_board(board_json)
def add_list(self, query_params=None):
"""
Create a list for a board. Returns a new List object.
"""
list_json = self.fetchJson(
uri_path=self.base_uri + '/lists',
http_method='POST',
query_params=query_params or {}
)
return self.create_list(list_json)
def add_member_by_id(self, member_id, membership_type='normal'):
"""
Add a member to the board using the id. Membership type can be
normal or admin. Returns JSON of all members if successful or raises an
Unauthorised exception if not.
"""
return self.fetch_json(
uri_path=self.base_uri + '/members/%s' % member_id,
http_method='PUT',
query_params={
'type': membership_type
}
)
def add_member(self, email, fullname, membership_type='normal'):
"""
Add a member to the board. Membership type can be normal or admin.
Returns JSON of all members if successful or raises an Unauthorised
exception if not.
"""
return self.fetch_json(
uri_path=self.base_uri + '/members',
http_method='PUT',
query_params={
'email': email,
'fullName': fullname,
'type': membership_type
}
)
def remove_member(self, member_id):
"""
Remove a member from the organisation.Returns JSON of all members if
successful or raises an Unauthorised exception if not.
"""
return self.fetch_json(
uri_path=self.base_uri + '/members/%s' % member_id,
http_method='DELETE'
)
# Deprecated
def getBoardInformation(self, query_params=None):
return self.get_board_information(query_params)
def getLists(self):
return self.get_lists()
def getCards(self):
return self.get_cards()
def getCard(self, card_id):
return self.get_card(card_id)
def getMembers(self):
return self.get_members()
def getOrganisation(self):
return self.get_organisation()
def updateBoard(self, query_params=None):
return self.update_board(query_params)
def addList(self, query_params=None):
return self.add_list(query_params)
def addMemberById(self, member_id, membership_type='normal'):
return self.add_member_by_id(member_id, membership_type)
def addMember(self, email, fullname, membership_type='normal'):
return self.add_member(email, fullname, membership_type)
def removeMember(self, member_id):
return self.remove_member(member_id)
| gpl-3.0 | -6,671,452,477,042,218,000 | 27.71978 | 79 | 0.579682 | false |
nicolasdespres/hunittest | hunittest/test/test_stopwatch.py | 1 | 2631 | # -*- encoding: utf-8 -*-
"""Test 'stopwath' module.
"""
import unittest
import time
from datetime import timedelta
from hunittest.stopwatch import StopWatch
class TestStopWatch(unittest.TestCase):
def assertTimedeltaAlmostEqual(self, td1, td2, prec=1e-3):
return abs(td1 - td2).total_seconds <= prec
def test_is_started(self):
sw = StopWatch()
self.assertFalse(sw.is_started)
sw.start()
self.assertTrue(sw.is_started)
sw.reset()
self.assertFalse(sw.is_started)
def test_split(self):
sw = StopWatch()
sw.start()
self.assertEqual(0, sw.splits_count)
### split 1
delay1 = 0.5
time.sleep(delay1)
sw.split()
self.assertEqual(1, sw.splits_count)
self.assertAlmostEqual(delay1,
sw.last_split_time.total_seconds(),
places=1)
self.assertAlmostEqual(delay1,
sw.mean_split_time.total_seconds(),
places=1)
self.assertEqual(sw.last_split_time, sw.total_split_time)
### split 1
delay2 = 1.0
time.sleep(delay2)
sw.split()
self.assertEqual(2, sw.splits_count)
self.assertAlmostEqual(delay2,
sw.last_split_time.total_seconds(),
places=1)
self.assertAlmostEqual((delay1 + delay2) / 2,
sw.mean_split_time.total_seconds(),
places=1)
self.assertAlmostEqual(delay1 + delay2,
sw.total_split_time.total_seconds(),
places=1)
def test_total_time(self):
sw = StopWatch()
sw.start()
delay = 0.5
time.sleep(delay)
self.assertAlmostEqual(delay, sw.total_time.total_seconds(), places=1)
def test_split_raises_if_not_started(self):
sw = StopWatch()
with self.assertRaises(RuntimeError):
sw.split()
def test_start_raises_if_already_started(self):
sw = StopWatch()
sw.start()
with self.assertRaises(RuntimeError):
sw.start()
def test_total_split_time_is_zero_when_not_started(self):
self.assertEqual(timedelta(0), StopWatch().total_split_time)
def test_mean_split_time_is_zero_when_not_started(self):
self.assertEqual(timedelta(0), StopWatch().mean_split_time)
def test_total_time_is_zero_when_not_started(self):
self.assertEqual(timedelta(0), StopWatch().total_time)
| bsd-2-clause | 5,127,935,866,404,837,000 | 31.085366 | 78 | 0.558723 | false |
Lukasa/testifi | testifi/server.py | 1 | 1038 | # -*- coding: utf-8 -*-
"""
testifi.server
~~~~~~~~~~~~~~
The primary server module in testifi.
"""
import sys
import structlog
from twisted.web import server, resource
from twisted.internet import reactor
from twisted.internet.defer import DeferredQueue
from twisted.internet.task import LoopingCall
from twisted.python.log import startLogging
from testifi.resources.test_collection import TestCollectionResource
from testifi.supervisor import Supervisor
def printer(x):
print x
def runServer():
structlog.configure(
processors=[structlog.twisted.EventAdapter()],
logger_factory=structlog.twisted.LoggerFactory(),
)
startLogging(sys.stdout)
sup = Supervisor(DeferredQueue())
root = resource.Resource()
root.putChild('tests', TestCollectionResource(sup))
release_poll = LoopingCall(sup.pollForNewReleases)
release_poll.start(300)
sup.testLoop()
site = server.Site(root)
reactor.listenTCP(8080, site)
reactor.run()
if __name__ == '__main__':
runServer()
| mit | -7,580,820,496,910,374,000 | 21.085106 | 68 | 0.716763 | false |
trolldbois/ctypeslib | test/util.py | 1 | 5667 | # From clang/bindings/python/cindex/test
# This file provides common utility functions for the test suite.
#
import ctypes
import os
from io import StringIO
from ctypes import RTLD_GLOBAL
from clang.cindex import Cursor
from clang.cindex import TranslationUnit
import unittest
from ctypeslib.codegen import clangparser, codegenerator, config
from ctypeslib.codegen import util as codegen_util
from ctypeslib.library import Library
import tempfile
def mktemp(suffix):
handle, fnm = tempfile.mkstemp(suffix)
os.close(handle)
return fnm
class ClangTest(unittest.TestCase):
namespace = None
text_output = None
full_parsing_options = False
def _gen(self, ofi, fname, flags=None, dlls=None):
"""Take a file input and generate the code.
"""
cfg = config.CodegenConfig()
flags = flags or []
dlls = [Library(name, nm="nm") for name in dlls]
# leave the new parser accessible for tests
self.parser = clangparser.Clang_Parser(flags)
if self.full_parsing_options:
self.parser.activate_macros_parsing()
self.parser.activate_comment_parsing()
with open(fname):
pass
self.parser.parse(fname)
items = self.parser.get_result()
# gen code
cfg.searched_dlls = dlls
cfg.clang_opts = flags
gen = codegenerator.Generator(ofi, cfg=cfg)
gen.generate_headers(self.parser)
gen.generate_code(items)
return gen
def gen(self, fname, flags=None, dlls=[], debug=False):
"""Take a file input and generate the code.
"""
flags = flags or []
dlls = dlls or []
ofi = StringIO()
gen = self._gen(ofi, fname, flags=flags, dlls=dlls)
# load code
namespace = {}
# DEBUG
# print ofi.getvalue()
# DEBUG
ofi.seek(0)
ignore_coding = ofi.readline()
# exec ofi.getvalue() in namespace
output = ''.join(ofi.readlines())
self.text_output = output
try:
# PY3 change
exec(output, namespace)
except Exception:
print(output)
raise
# except NameError:
# print(output)
self.namespace = codegen_util.ADict(namespace)
if debug:
print(output)
return
def convert(self, src_code, flags=[], dlls=[], debug=False):
"""Take a string input, write it into a temp file and the code.
"""
# This seems a bit redundant, when util.get_tu() exists.
hfile = mktemp(".h")
with open(hfile, "w") as f:
f.write(src_code)
try:
self.gen(hfile, flags, dlls, debug)
finally:
os.unlink(hfile)
return
def _get_target_with_struct_hack(self, name):
""" because we rename "struct x" to struct_x, we have to reverse that
"""
target = codegen_util.get_cursor(self.parser.tu, name)
if target is None:
target = codegen_util.get_cursor(self.parser.tu, name.replace('struct_', ''))
if target is None:
target = codegen_util.get_cursor(self.parser.tu, name.replace('union_', ''))
return target
def assertSizes(self, name):
""" Compare size of records using clang sizeof versus python sizeof."""
target = self._get_target_with_struct_hack(name)
self.assertTrue(
target is not None,
'%s was not found in source' %
name)
_clang = target.type.get_size()
_python = ctypes.sizeof(getattr(self.namespace, name))
self.assertEqual(_clang, _python,
'Sizes for target: %s Clang:%d Python:%d flags:%s' % (name, _clang,
_python, self.parser.flags))
return
def assertOffsets(self, name):
""" Compare offset of records' fields using clang offsets versus
python offsets.
name: the name of the structure.
The findings and offset comparaison of members fields is automatic.
"""
target = self._get_target_with_struct_hack(name)
target = target.type.get_declaration()
self.assertTrue(
target is not None,
'%s was not found in source' %
name)
members = [(c.displayname, c) for c in target.type.get_fields()]
_clang_type = target.type
_python_type = getattr(self.namespace, name)
# let'shandle bitfield - precalculate offsets
fields_offsets = dict()
for field_desc in _python_type._fields_:
_n = field_desc[0]
_f = getattr(_python_type, _n)
bfield_bits = _f.size >> 16
if bfield_bits:
ofs = 8 * _f.offset + _f.size & 0xFFFF
else:
ofs = 8 * _f.offset
# base offset
fields_offsets[_n] = ofs
# now use that
for i, (membername, field) in enumerate(members):
# anonymous fields
if membername == '':
membername = '_%d' % i
# _c_offset = _clang_type.get_offset(member)
_c_offset = field.get_field_offsetof()
# _p_offset = 8*getattr(_python_type, member).offset
_p_offset = fields_offsets[membername]
self.assertEqual(_c_offset, _p_offset,
'Offsets for target: %s.%s Clang:%d Python:%d flags:%s' % (
name, membername, _c_offset, _p_offset, self.parser.flags))
return
__all__ = [
]
| mit | 646,252,004,290,654,700 | 33.554878 | 107 | 0.562908 | false |
zevanzhao/TCCL-Code | ADF/ADFDFTB2xyz.py | 1 | 1334 | #!/usr/bin/env python
#Time-stamp: <Last updated: Zhao,Yafan [email protected] 2013-11-25 20:20:08>
"""
A script to get the optimized geometry from ADF DFTB calculation out file.
"""
import sys, re
if (len(sys.argv) < 2):
print "Usage: ADFDFTB2xyz.py [adf.out]"
exit(0)
ADFOUT = sys.argv[1]
inp = open(ADFOUT, "r")
outlines = inp.readlines()
#Search for the geometry section
start = 0
end = 0
i = 0
for line in outlines:
if (re.match(ur"^Geometry$", line)):
#print "Find start at line %d" %(i)
start = i
elif (re.match(ur"^Total Energy \(hartree\)", line)):
#print "Find end at line %d" %(i)
end = i+1
i += 1
i = 0
geolines = outlines[start:end]
#print "%s" % (geolines)
mid = 0
#Search for the geometry section in angstrom
for line in geolines:
if (re.search(ur"angstrom", line)):
mid = i+1
break
i += 1
angstromgeo = geolines[mid:]
#print "%s" % (angstromgeo)
#print the geometry
j = 0
xyzlines = ""
energy = 0
for line in angstromgeo:
array = line.split()
if ( len(array) == 5):
j += 1
xyzlines += "%s %s %s %s\n" % (array[1], array[2], array[3], array[4])
elif (re.match(ur"^Total Energy", line)):
energy = array[3]
movielines = ""
movielines += "%d\n%s\n%s" % (j, energy, xyzlines)
print "%s" % (movielines),
| gpl-3.0 | 2,415,750,034,143,794,000 | 25.68 | 85 | 0.60045 | false |
calee0219/Course | ML/lab2/lab2_vedio.py | 1 | 1755 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import time
import pandas as pd
from sklearn.preprocessing import normalize
from sklearn.neighbors import KNeighborsClassifier as KNN
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import KFold
from scipy.spatial.distance import cosine as Cos
dataset_url = 'http://mlr.cs.umass.edu/ml/machine-learning-databases/' + \
'wine-quality/winequality-white.csv'
data = pd.read_csv(dataset_url, sep=';')
feature = normalize(data.iloc[:, :-2])
target = data.iloc[:, -1]
start = time.time()
nbr = KNN(n_neighbors=11, weights='distance', \
algorithm='brute', metric='manhattan')
nbr.fit(feature, target)
pred = nbr.predict(feature)
mx = confusion_matrix(target, pred)
print(mx.trace()/mx.sum())
print("===== used %s seconds =====" % (time.time()-start))
start = time.time()
kf = KFold(n_splits=12)
total_rate = 0
for train_index, test_index in kf.split(data):
train_feature, test_feature = feature[train_index], feature[test_index]
train_target, test_target = target[train_index], target[test_index]
nbr = KNN(n_neighbors=11, weights='distance', \
algorithm='brute', metric='manhattan')
nbr.fit(train_feature, train_target)
pred = nbr.predict(test_feature)
mx = confusion_matrix(test_target, pred)
total_rate += mx.trace()/mx.sum()
print(total_rate/12)
print("===== used %s seconds =====" % (time.time()-start))
# Cos re
def cosDist(a, b):
return Cos(a, b)
start = time.time()
nbr = KNN(n_neighbors=11, algorithm='brute', metric=cosDist)
nbr.fit(feature, target)
pred = nbr.predict(feature)
mx = confusion_matrix(target, pred)
print(mx.trace()/mx.sum())
print("===== used %s seconds =====" % (time.time()-start))
| mit | 4,068,913,304,894,074,000 | 30.339286 | 75 | 0.679202 | false |
henkelis/sonospy | web2py/applications/sonospy/models/menu.py | 1 | 1952 | # -*- coding: utf-8 -*-
#########################################################################
## Customize your APP title, subtitle and menus here
#########################################################################
response.title = request.application
response.subtitle = T('UPnP ControlPoint')
##########################################
## this is the main application menu
## add/remove items as required
##########################################
response.menu = [
[T('Index'), False,
URL(request.application,'default','index'), []],
]
##########################################
## this is here to provide shortcuts
## during development. remove in production
##########################################
response.menu_edit=[
[T('Edit'), False, URL('admin', 'default', 'design/%s' % request.application),
[
[T('Controller'), False,
URL('admin', 'default', 'edit/%s/controllers/%s.py' \
% (request.application,request.controller=='appadmin' and
'default' or request.controller))],
[T('View'), False,
URL('admin', 'default', 'edit/%s/views/%s' \
% (request.application,response.view))],
[T('Layout'), False,
URL('admin', 'default', 'edit/%s/views/layout.html' \
% request.application)],
[T('Stylesheet'), False,
URL('admin', 'default', 'edit/%s/static/base.css' \
% request.application)],
[T('DB Model'), False,
URL('admin', 'default', 'edit/%s/models/db.py' \
% request.application)],
[T('Menu Model'), False,
URL('admin', 'default', 'edit/%s/models/menu.py' \
% request.application)],
[T('Database'), False,
URL(request.application, 'appadmin', 'index')],
]
],
]
| gpl-3.0 | -4,725,703,634,237,785,000 | 36.538462 | 80 | 0.433402 | false |
mgramsay/PlasMarkov | tweet.py | 1 | 2137 | # -*- coding: utf-8 -*-
# Copyright (c) 2016 Martin Ramsay
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Handles the linking to Twitter.
"""
import codecs
import os
from time import gmtime, strftime
import tweepy
from secrets import C_KEY, C_SECRET, A_TOKEN, A_TOKEN_SECRET
def send(text):
"""
Post a message to Twitter.
"""
auth = tweepy.OAuthHandler(C_KEY, C_SECRET)
auth.set_access_token(A_TOKEN, A_TOKEN_SECRET)
api = tweepy.API(auth)
print 'Sending: ' + text
try:
api.update_status(text)
except tweepy.error.TweepError as err:
print err.message
return err.message
else:
return 'Tweeted: ' + text
def log(message, logfile_name):
"""
Update the log file.
"""
path = os.path.realpath(os.path.join(os.getcwd(),
os.path.dirname(__file__)))
with codecs.open(os.path.join(path, logfile_name), mode='a+',
encoding='utf-8') as logfile:
logtime = strftime('%d %b %Y %H:%M:%S', gmtime())
logfile.write(logtime + (' ' + message + '\n').decode('utf-8'))
| mit | 4,053,133,167,695,728,600 | 35.220339 | 79 | 0.680861 | false |
dataforimpact/veliquest | scrapers/v1-plus-local-storage/jcdecaux-scraper.py | 1 | 3422 | import os
import sys
import requests
import json
import datetime as dt
from boto.s3.connection import S3Connection, Location
from boto.s3.key import Key
def unsafe_getenviron(k):
v = os.environ.get(k)
if(v):
return v
else:
raise Exception('environment variable %s not set' % k)
JC_DECAUX_API_KEY = unsafe_getenviron('JC_DECAUX_API_KEY')
AWS_SECRET_KEY = unsafe_getenviron('AWS_SECRET_KEY')
AWS_ACCESS_KEY = unsafe_getenviron('AWS_ACCESS_KEY')
VELIQUEST_BUCKET = unsafe_getenviron('VELIQUEST_BUCKET')
# initiate S3 connection
s3conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY)
dfibucket = s3conn.get_bucket(VELIQUEST_BUCKET)#, location=Location.EU)
# attempts to create a bucket
def getjcdecaux_data_as_json():
try:
all_stations_r = requests.get('https://api.jcdecaux.com/vls/v1/stations', params={'apiKey': JC_DECAUX_API_KEY})
status = all_stations_r.status_code
if (status == 200):
json_data = all_stations_r.json()
return status, json_data
elif (status == 403):
raise Exception("%s apiKey for JCDecaux is not valid" % JC_DECAUX_API_KEY)
elif (status == 500):
raise Exception("JCDecaux Server Error")
else:
raise Exception("JCDecaux Server Error")
except Exception as e:
raise e
def parse_station(s):
"""Outputs a single line with (comma serpated) values of
[contract_name, number, status, bike_stands, available_bike_stands, available_bikes, last_update]
Note : status is 1 when "OPEN" and 0 when "CLOSED" to reduce bytes # per station
"""
keys = ['contract_name', 'number', 'status', 'bike_stands', 'available_bike_stands', 'available_bikes', 'last_update']
line_vals = [str(s[k]) if (k!='status') else ("1" if (s[k]=='OPEN') else "0")
for k in keys]
return ",".join(line_vals)
def parse_stations(stations_json):
lines_arr = [parse_station(s) for s in stations_json]
return '\n'.join(lines_arr)
def filename_from_date(dte):
return dte.strftime("%Hh%Mm%S_%f.csv")
def dirpath_from_date(dte):
return dte.strftime("%Y/%m/%d/")
def s3_key(dirpath, filename):
return "/veliquest/jcdecaux/prod/v1/" + dirpath + filename
def store_stations_in_s3(dirpath, filename, content):
k = Key(dfibucket)
k.key = s3_key(dirpath, filename)
k.set_contents_from_string(content)
return k.key
def ensure_dir(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
def store_stations_locally(absdir, filename, content):
ensure_dir(absdir)
fpath = absdir + filename
with open(fpath, 'w') as f:
f.write(content)
return fpath
if not len(sys.argv)==2:
print "Pass abs path of directory in sync with S3"
exit()
if not sys.argv[1][-1] == "/":
print "arg must be a directory (does not end with /)"
exit()
if not sys.argv[1][0] == "/":
print "arg must be abs directory (does not start with /)"
exit()
print "Executing Request..."
status, json_data = getjcdecaux_data_as_json()
if (status==200):
print "Done (200)"
print "Parsing stations data..."
csv_lines = parse_stations(json_data)
dte = dt.datetime.utcnow()
dirpath, filename = dirpath_from_date(dte), filename_from_date(dte)
print "Storing to S3..."
s3_key = store_stations_in_s3(dirpath, filename, csv_lines)
print "S3 stored in %s at %s" % (VELIQUEST_BUCKET, s3_key)
print "Storing locally..."
base_dir = sys.argv[1]
abs_dir = base_dir + dirpath
local_path = store_stations_locally(abs_dir, filename, csv_lines)
print "Locally stored in %s" % local_path
| mit | 4,501,067,044,483,126,000 | 25.944882 | 119 | 0.697545 | false |
dataunit/dataunit | dataunit/case.py | 1 | 1428 | import unittest
from dataunit.context import Context, get_global_context
class DataUnitTestCase(unittest.TestCase):
"""A class defining a single DataUnit tests case.
This class is designed to be instantiated with a
list of TestCommand instances which define the
behavior of this tests case.
:note: This class will be executed as a test case by PyCharm. It should pass
due to an empty test_command list default.
:param test_commands: List of TestCommand instances used to execute
the tests case.
"""
# noinspection PyPep8Naming
def __init__(self, methodName='runTest', test_commands: list=[], global_context: Context=None):
# Validate Params
if test_commands is None:
raise ValueError('Parameter test_commands must not be None.')
for command in test_commands:
if not hasattr(command, 'run'):
raise ValueError('Parameter test_commands must be list of runnable objects')
# Call unittest.TestCase.__init__() to setup default behavior.
super().__init__(methodName)
# Set attributes on self.
self.test_commands = test_commands
self.global_context = global_context
def runTest(self):
"""Execute the actual tests case
"""
test_context = Context(parent=self.global_context)
for cmd in self.test_commands:
cmd.run(test_context)
| apache-2.0 | -203,158,893,744,653,540 | 33.829268 | 99 | 0.663866 | false |
gavinfish/leetcode-share | python/065 Valid Number.py | 1 | 1907 | '''
Validate if a given string is numeric.
Some examples:
"0" => true
" 0.1 " => true
"abc" => false
"1 a" => false
"2e10" => true
Note: It is intended for the problem statement to be ambiguous. You should gather all requirements up front before implementing one.
'''
class Solution(object):
def isNumber(self, s):
"""
:type s: str
:rtype: bool
"""
s = s.strip()
length = len(s)
index = 0
# Deal with symbol
if index < length and (s[index] == '+' or s[index] == '-'):
index += 1
is_normal = False
is_exp = True
# Deal with digits in the front
while index < length and s[index].isdigit():
is_normal = True
index += 1
# Deal with dot ant digits behind it
if index < length and s[index] == '.':
index += 1
while index < length and s[index].isdigit():
is_normal = True
index += 1
# Deal with 'e' and number behind it
if is_normal and index < length and (s[index] == 'e' or s[index] == 'E'):
index += 1
is_exp = False
if index < length and (s[index] == '+' or s[index] == '-'):
index += 1
while index < length and s[index].isdigit():
index += 1
is_exp = True
# Return true only deal with all the characters and the part in front of and behind 'e' are all ok
return is_normal and is_exp and index == length
if __name__ == "__main__":
assert Solution().isNumber("3.e-23") == True
assert Solution().isNumber(".2e81") == True
assert Solution().isNumber("2e10") == True
assert Solution().isNumber(" 0.1") == True
assert Solution().isNumber("1 b") == False
assert Solution().isNumber("3-2") == False
assert Solution().isNumber("abc") == False | mit | -3,100,469,301,061,550,000 | 32.473684 | 132 | 0.528579 | false |
phiros/nepi | src/nepi/resources/omf/interface.py | 1 | 11938 | #
# NEPI, a framework to manage network experiments
# Copyright (C) 2013 INRIA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Alina Quereilhac <[email protected]>
# Julien Tribino <[email protected]>
import os, time
from nepi.util.timefuncs import tnow
from nepi.execution.resource import ResourceManager, clsinit_copy, \
ResourceState
from nepi.execution.attribute import Attribute, Flags
from nepi.resources.omf.node import OMFNode, confirmation_counter, reschedule_check
from nepi.resources.omf.omf_resource import ResourceGateway, OMFResource
from nepi.resources.omf.channel import OMFChannel
from nepi.resources.omf.omf_api_factory import OMFAPIFactory
@clsinit_copy
class OMFWifiInterface(OMFResource):
"""
.. class:: Class Args :
:param ec: The Experiment controller
:type ec: ExperimentController
:param guid: guid of the RM
:type guid: int
"""
_rtype = "omf::WifiInterface"
_authorized_connections = ["omf::Node" , "omf::Channel", "wilabt::sfa::Node"]
@classmethod
def _register_attributes(cls):
"""Register the attributes of an OMF interface
"""
name = Attribute("name","Alias of the interface : wlan0, wlan1, ..", default = "wlan0")
mode = Attribute("mode","Mode of the interface")
hw_mode = Attribute("hw_mode","Choose between : a, b, g, n")
essid = Attribute("essid","Essid of the interface")
ip = Attribute("ip","IP of the interface")
cls._register_attribute(name)
cls._register_attribute(mode)
cls._register_attribute(hw_mode)
cls._register_attribute(essid)
cls._register_attribute(ip)
def __init__(self, ec, guid):
"""
:param ec: The Experiment controller
:type ec: ExperimentController
:param guid: guid of the RM
:type guid: int
:param creds: Credentials to communicate with the rm (XmppClient for OMF)
:type creds: dict
"""
super(OMFWifiInterface, self).__init__(ec, guid)
self._conf = False
self.alias = None
self._type = None
self.create_id = None
self._create_cnt = 0
self.release_id = None
self._release_cnt = 0
self._topic_iface = None
self._omf_api = None
self._type = ""
# For performance tests
self.perf = True
self.begin_deploy_time = None
def valid_connection(self, guid):
""" Check if the connection with the guid in parameter is possible.
Only meaningful connections are allowed.
:param guid: Guid of the current RM
:type guid: int
:rtype: Boolean
"""
rm = self.ec.get_resource(guid)
if rm.get_rtype() in self._authorized_connections:
msg = "Connection between %s %s and %s %s accepted" % \
(self.get_rtype(), self._guid, rm.get_rtype(), guid)
self.debug(msg)
return True
msg = "Connection between %s %s and %s %s refused" % \
(self.get_rtype(), self._guid, rm.get_rtype(), guid)
self.debug(msg)
return False
@property
def exp_id(self):
return self.ec.exp_id
@property
def node(self):
rm_list = self.get_connected(OMFNode.get_rtype())
if rm_list: return rm_list[0]
return None
@property
def channel(self):
rm_list = self.get_connected(OMFChannel.get_rtype())
if rm_list: return rm_list[0]
return None
def configure_iface(self):
""" Configure the interface without the ip
"""
if self.node.state < ResourceState.READY:
self.ec.schedule(self.reschedule_delay, self.deploy)
return False
for attrname in ["mode", "type", "essid"]:
if attrname == "type" :
attrval = self._type
else :
attrval = self.get(attrname)
attrname = "net/%s/%s" % (self.alias, attrname)
self._omf_api.configure(self.node.get('hostname'), attrname,
attrval)
super(OMFWifiInterface, self).do_provision()
return True
def configure_ip(self):
""" Configure the ip of the interface
.. note : The ip is separated from the others parameters to avoid
CELL ID shraing problem. By putting th ip at the end of the configuration,
each node use the same channel and can then share the same CELL ID.
In the second case, the channel is defined at the end and the node don't
share a common CELL ID and can not communicate.
"""
if self.channel.state < ResourceState.READY:
self.ec.schedule(self.reschedule_delay, self.deploy)
return False
attrval = self.get("ip")
if '/' in attrval:
attrval,mask = attrval.split('/')
attrname = "net/%s/%s" % (self.alias, "ip")
self._omf_api.configure(self.node.get('hostname'), attrname,
attrval)
return True
def configure_on_omf5(self):
""" Method to configure the wifi interface when OMF 5.4 is used.
"""
self._type = self.get('hw_mode')
if self.get('name') == "wlan0" or "eth0":
self.alias = "w0"
else:
self.alias = "w1"
res = False
if self.state < ResourceState.PROVISIONED:
if self._conf == False:
self._conf = self.configure_iface()
if self._conf == True:
res = self.configure_ip()
return res
def check_deploy(self, cid):
""" Check, through the mail box in the parser,
if the confirmation of the creation has been received
:param cid: the id of the original message
:type guid: string
"""
uid = self._omf_api.check_mailbox("create", cid)
if uid :
return uid
return False
def do_deploy(self):
""" Deploy the RM. It means : Get the xmpp client and send messages
using OMF 5.4 or 6 protocol to configure the interface.
"""
if not self.node or self.node.state < ResourceState.READY:
self.debug("---- RESCHEDULING DEPLOY ---- node state %s "
% self.node.state )
self.ec.schedule(self.reschedule_delay, self.deploy)
return
if not self.channel or self.channel.state < ResourceState.READY:
self.debug("---- RESCHEDULING DEPLOY ---- channel state %s "
% self.channel.state )
self.ec.schedule(self.reschedule_delay, self.deploy)
return
## For performance test
if self.perf:
self.begin_deploy_time = tnow()
self.perf = False
self.set('xmppUser',self.node.get('xmppUser'))
self.set('xmppServer',self.node.get('xmppServer'))
self.set('xmppPort',self.node.get('xmppPort'))
self.set('xmppPassword',self.node.get('xmppPassword'))
self.set('version',self.node.get('version'))
if not self.get('xmppServer'):
msg = "XmppServer is not initialzed. XMPP Connections impossible"
self.error(msg)
raise RuntimeError, msg
if not (self.get('xmppUser') or self.get('xmppPort')
or self.get('xmppPassword')):
msg = "Credentials are not all initialzed. Default values will be used"
self.warn(msg)
if not self._omf_api :
self._omf_api = OMFAPIFactory.get_api(self.get('version'),
self.get('xmppServer'), self.get('xmppUser'), self.get('xmppPort'),
self.get('xmppPassword'), exp_id = self.exp_id)
if not (self.get('name')):
msg = "Interface's name is not initialized"
self.error(msg)
raise RuntimeError, msg
if not (self.get('mode') and self.get('essid') \
and self.get('hw_mode') and self.get('ip')):
msg = "Interface's variable are not initialized"
self.error(msg)
raise RuntimeError, msg
if self.get('version') == "5":
res = self.configure_on_omf5()
else :
res = self.configure_on_omf6()
if res:
super(OMFWifiInterface, self).do_deploy()
def configure_on_omf6(self):
""" Method to configure the wifi interface when OMF 6 is used.
"""
if not self.create_id :
props = {}
props['wlan:if_name'] = self.get('name')
props['wlan:mode'] = {
"mode": self.get('mode'),
"hw_mode" : self.get('hw_mode'),
"channel" : self.channel.get('channel'),
"essid" : self.get('essid'),
"ip_addr" : self.get('ip'),
"frequency" : self.channel.frequency,
"phy" : "%0%"
}
props['wlan:hrn'] = self.get('name')
props['wlan:type'] = "wlan"
self.create_id = os.urandom(16).encode('hex')
self._omf_api.frcp_create( self.create_id, self.node.get('hostname'), "wlan", props = props)
if self._create_cnt > confirmation_counter:
msg = "Couldn't retrieve the confirmation of the creation"
self.error(msg)
raise RuntimeError, msg
uid = self.check_deploy(self.create_id)
if not uid:
self._create_cnt +=1
self.ec.schedule(reschedule_check, self.deploy)
return False
self._topic_iface = uid
self._omf_api.enroll_topic(self._topic_iface)
return True
def check_release(self, cid):
""" Check, through the mail box in the parser,
if the confirmation of the release has been received
:param cid: the id of the original message
:type guid: string
"""
res = self._omf_api.check_mailbox("release", cid)
if res :
return res
return False
def do_release(self):
""" Clean the RM at the end of the experiment and release the API
"""
if self._omf_api:
if self.get('version') == "6" and self._topic_iface :
if not self.release_id:
self.release_id = os.urandom(16).encode('hex')
self._omf_api.frcp_release( self.release_id, self.node.get('hostname'),self._topic_iface, res_id=self._topic_iface)
if self._release_cnt < confirmation_counter:
cid = self.check_release(self.release_id)
if not cid:
self._release_cnt +=1
self.ec.schedule(reschedule_check, self.release)
return
else:
msg = "Couldn't retrieve the confirmation of the release"
self.error(msg)
OMFAPIFactory.release_api(self.get('version'),
self.get('xmppServer'), self.get('xmppUser'), self.get('xmppPort'),
self.get('xmppPassword'), exp_id = self.exp_id)
super(OMFWifiInterface, self).do_release()
| gpl-3.0 | 7,847,344,216,218,451,000 | 34.111765 | 135 | 0.570028 | false |
AntreasAntoniou/DeepClassificationBot | deepanimebot/bot.py | 1 | 7412 | # -*- coding: utf-8 -*-
'''
Twitter bot who replies with the best guesses of
what a @mention'ed image is.
'''
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import functools
import logging
import os
import random
import time
import tweepy
import deploy
import gceutil
from deepanimebot import classifiers
from deepanimebot import exceptions as exc
from deepanimebot import messages
INPUT_SHAPE = 128 # change it to your input image size
TWEET_MAX_LENGTH = 140
logging.basicConfig()
logger = logging.getLogger('bot')
logger.setLevel(logging.INFO)
def wait_like_a_human(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
start = time.time()
rv = f(*args, **kwargs)
if not rv:
return
api, action, args, kwargs = rv
end = start + random.randint(1, 5)
sleep = end - time.time()
if sleep > 0:
time.sleep(sleep)
return getattr(api, action)(*args, **kwargs)
return wrapper
class ReplyToTweet(tweepy.StreamListener):
def __init__(self, screen_name, classifier, api=None, silent=False):
super(ReplyToTweet, self).__init__(api)
self.screen_name = screen_name
self.classifier = classifier
self.silent = silent
@wait_like_a_human
def on_direct_message(self, data):
status = data.direct_message
sender_name = status['sender']['screen_name']
if sender_name == self.screen_name:
return
logger.debug(u"{0} incoming dm {1}".format(status['id'], status['text']))
reply = self.get_reply(status['id'], status['entities'], TWEET_MAX_LENGTH - len('d {} '.format(sender_name)), messages.DMMessages)
if self.silent:
return
return self.api, 'send_direct_message', tuple(), dict(user_id=status['sender']['id'], text=reply)
@wait_like_a_human
def on_status(self, status):
sender_name = status.author.screen_name
if sender_name == self.screen_name:
return
logger.debug(u"{0} incoming status {1}".format(status.id, status.text))
if retweets_me(status, self.screen_name):
logger.debug("{0} is a retweet".format(status.id))
return
if not status_mentions(status, self.screen_name):
logger.debug("{0} doesn't mention {1}".format(status.id, self.screen_name))
return
prefix = '@{0} '.format(sender_name)
reply = self.get_reply(status.id, status.entities, TWEET_MAX_LENGTH - len(prefix), messages.StatusMessages)
status_text = prefix + reply
if self.silent:
return
return self.api, 'update_status', (status_text,), dict(in_reply_to_status_id=status.id)
def get_reply(self, status_id, entities, max_length, messages):
maybe_image_url = url_from_entities(entities)
if not maybe_image_url:
logger.debug("{0} doesn't have a URL".format(status_id))
return messages.give_me_an_image()
try:
y = self.classifier.classify(url=maybe_image_url)
except exc.TimeoutError:
logger.debug("{0} timed out while classifying {1}".format(status_id, maybe_image_url))
return messages.took_too_long()
except exc.NotImage:
logger.debug("{0} no image found at {1}".format(status_id, maybe_image_url))
return messages.not_an_image()
except exc.RemoteError as e:
logger.debug("{0} remote error {1}".format(status_id, e))
return e.message
except Exception as e:
logger.error("{0} error while classifying {1}: {2}".format(status_id, maybe_image_url, e))
return messages.something_went_wrong()
reply = messages.my_guess(y, max_length)
logger.debug("{0} reply: {1}".format(status_id, reply))
return reply
def on_error(self, status):
if status == 420:
# we are rate-limited.
# returning False disconnects the stream
return False
def retweets_me(status, screen_name):
retweeted_status = getattr(status, 'retweeted_status', None)
if retweeted_status is None:
return False
return retweeted_status.author.screen_name == screen_name
def status_mentions(status, screen_name):
for mention in status.entities.get('user_mentions', []):
if mention['screen_name'] == screen_name:
return True
return False
def url_from_entities(entities):
for media in entities.get('media', []):
if media['type'] == 'photo':
return media['media_url']
for url in entities.get('urls', []):
return url['expanded_url']
def main(args):
if args.debug:
logger.setLevel(logging.DEBUG)
auth = tweepy.OAuthHandler(args.consumer_key, args.consumer_secret)
auth.set_access_token(args.access_token, args.access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True)
screen_name = api.me().screen_name
if args.classifier == 'mock':
classifier = classifiers.MockClassifier()
elif args.classifier == 'local':
classifier = classifiers.URLClassifier(classifiers.ImageClassifier(args.dataset_path, INPUT_SHAPE))
elif args.classifier == 'remote':
classifier = classifiers.RemoteClassifier(args.remote_endpoint)
stream = tweepy.Stream(auth=auth, listener=ReplyToTweet(screen_name, classifier, api, args.silent))
logger.info('Listening as {}'.format(screen_name))
stream.userstream(track=[screen_name])
if __name__ == '__main__':
import configargparse
parser = configargparse.getArgumentParser()
parser.add('-c', '--config', required=False, is_config_file=True, help='Config file path. See bot.ini.example')
parser.add('--consumer-key', required=True, env_var='CONSUMER_KEY', help='Twitter app consumer key')
parser.add('--consumer-secret', required=True, env_var='CONSUMER_SECRET', help='Twitter app consumer secret')
parser.add('--access-token', required=True, env_var='ACCESS_TOKEN', help='Twitter access token')
parser.add('--access-token-secret', required=True, env_var='ACCESS_TOKEN_SECRET', help='Twitter access token secret')
parser.add('--classifier', choices=['mock', 'local', 'remote'], default='mock', help='Which classifier to use')
parser.add('--dataset-path', default='data/data.hdf5', help='Path to dataset when using a local calssifier')
parser.add('--remote-endpoint', default=None, help='API endpoint to call when using a remote classifier')
parser.add('--silent', action='store_true', default=False, help='Run bot without actually replying')
parser.add('--debug', action='store_true', default=False, help='Set log level to debug')
try:
args = parser.parse_args()
except SystemExit as e:
if gceutil.detect_gce_environment(logger):
attrname_env_varnames = {action.dest.replace('_', '-'): action.env_var
for action in parser._actions if action.env_var}
metadata = gceutil.get_metadata(attrname_env_varnames.keys())
environ = dict(os.environ)
environ.update({attrname_env_varnames[attr]: value for attr, value in metadata.items()})
args = parser.parse_args(env_vars=environ)
else:
raise
main(args)
| mit | 5,811,651,441,929,854,000 | 35.875622 | 138 | 0.639234 | false |
grizmio/DictQueue | DictQueue/DictQueue.py | 1 | 4887 |
import asyncio
import time
from collections import OrderedDict
class KeyNotInDictException(Exception):
pass
class KeyVanishedException(Exception):
pass
class AlreadyWaitingForItException(Exception):
pass
class DictContainer:
def __init__(self, max_size):
self.__requested_keys = set()
self.__container = OrderedDict() # '' => (time.time(), obj)
self.__added_item = asyncio.Condition()
self.__size = 0
self.__max_size = max_size
async def __wait_for_put(self):
with await self.__added_item:
await self.__added_item.wait()
return True
def __get(self, key):
if key in self.__container:
try:
x = self.__container.pop(key)
self.__size -= 1
return x
except KeyError:
raise KeyVanishedException(key)
else:
raise KeyNotInDictException
async def get(self, key):
if key in self.__requested_keys:
raise AlreadyWaitingForItException(key)
self.__requested_keys.add(key)
x = None
while 1:
try:
x = self.__get(key)
except KeyVanishedException:
raise
except KeyNotInDictException:
pass
if isinstance(x, dict):
break
with await self.__added_item:
await self.__added_item.wait()
if key in self.__container:
try:
x = self.__get(key)
except KeyVanishedException:
raise
except KeyNotInDictException:
pass
else:
break
self.__requested_keys.remove(key)
return x
async def get_timeout(self, key, timeout):
if key in self.__requested_keys:
raise AlreadyWaitingForItException(key)
self.__requested_keys.add(key)
x = None
timeout_total = timeout
timeout_left = timeout_total
timeout_end = time.time() + timeout_total
while timeout_left > 0 and x is None:
try:
x = self.__get(key)
except KeyVanishedException:
raise
except KeyNotInDictException:
pass
if isinstance(x, dict):
break
try:
# siempre el maximo a esperar es el tiempo que queda de timeout: timeout_left
await asyncio.wait_for(self.__wait_for_put(), timeout=timeout_left)
except asyncio.TimeoutError:
print('Timeout :-(', key)
break
timeout_left = timeout_end - time.time()
self.__requested_keys.remove(key)
return x
async def put(self, m_key, item):
# __size empieza en 1
if self.__size > self.__max_size:
to_pop = self.__size - self.__max_size
self.__size -= to_pop
for k in list(self.__container)[:to_pop]:
print('Quitando:', k)
self.__container.pop(k)
self.__container[m_key] = item
self.__size += 1
with await self.__added_item:
try:
self.__added_item.notify_all()
except Exception as EE:
print('\n\n:--->>>> put()', EE, '\n\n')
if __name__ == '__main__':
# http://stackoverflow.com/questions/23864341/equivalent-of-asyncio-queues-with-worker-threads
# http://stackoverflow.com/questions/35796421/async-def-and-coroutines-whats-the-link
import random
q = DictContainer(3)
async def produce():
while 1:
print('Produciendo')
for c in '1234':
await q.put('action-'+c, {'muchainfo': [random.randint(0, 999) for r in range(10)]})
await asyncio.sleep(5.1 + random.random())
async def consume1():
while 1:
print('cosumiendo 1')
value = None
while value is None:
value = await q.get_timeout('action-1', 9)
print('consume1 value:', value)
await asyncio.sleep(0.15)
await asyncio.sleep(0.2 + random.random())
async def consume2():
while 1:
print('cosumiendo 2')
value = None
while value is None:
value = await q.get_timeout('action-2', 3)
print('consume2 value:', value)
await asyncio.sleep(0.15)
print("Consumed2: ", value)
await asyncio.sleep(0.2 + random.random())
loop = asyncio.get_event_loop()
loop.create_task(produce())
time.sleep(0.2)
loop.create_task(consume1())
loop.create_task(consume2())
loop.run_forever()
| mit | 1,505,946,286,195,720,000 | 28.089286 | 100 | 0.516268 | false |
ucsd-ccbb/Oncolist | src/server/Louvain/cluster_analysis_module.py | 1 | 30972 |
import pandas as pd
import numpy as np
import time, os, shutil, re, community
import networkx as nx
import matplotlib.pyplot as plt
import scipy.spatial.distance as ssd
import scipy.cluster.hierarchy as sch
# import cluster_connect module
import cluster_connect
"""
-------------------------------------------------------------
Author: Brin Rosenthal ([email protected])
-------------------------------------------------------------
"""
from Utils import HypergeomCalculator
from GO import GOLocusParser
from multiprocessing import Pool
from functools import partial
# import the authomatic GO annotation tools NOTE: CHANGE THIS PATH!!
import sys
#sys.path.append('/Users/brin/Google_Drive/UCSD/cluster_code/go_annotation')
#from HypergeomCalculator import *
def import_TCGA_data(path_file):
'''
function to import data and create network- return graph and edge list, input path to file (tsv)
'''
D_df = pd.read_csv(path_file, sep='\t', names=['var1', 'var2', 'corr', 'p'])
nodes = np.union1d(D_df.var1, D_df.var2)
# don't need to make big network- takes a long time
edge_list_w = zip(list(D_df['var1']), list(D_df['var2']), list(np.abs(D_df['corr']))) # try using absolute value of correlations
return D_df, edge_list_w
def find_edges_thresh(edge_list_total, edge_thresh=0, gamma=1, weight_flag='on'):
'''
find edges < threshold and corresponding list of nodes
find edges with weights less than a given threshold, the corresponding nodes,
return edges, nodes, and graph constructed from these weighted edges and nodes
NOTE: gamma and edge_thresh were set after analysis of gamma_scan (see cfncluster_gamma_scan.py), to optimize modularity and overlap fraction, while maintaining a large enough number of groups 5 < size < 500
UPDATE 1/27/2016: edge_thresh and gamma defaults set to 0 and 1, respectively--> including clusters from multiple gammas
'''
if weight_flag == 'on':
elarge = [(u, v, d**gamma) for (u, v, d) in edge_list_total if d > edge_thresh]
#esmall=[(u,v,d) for (u,v,d) in edge_list_total if d['weight'] <=edge_thresh]
# what are the corresponding nodes?
nodetemp = []
[nodetemp.append(u) for (u, v, d) in elarge]
[nodetemp.append(v) for (u, v, d) in elarge]
else:
# if no weights, only return connecting nodes
elarge=[(u, v) for (u, v, d) in edge_list_total if d > edge_thresh]
# what are the corresponding nodes?
nodetemp = []
[nodetemp.append(u) for (u, v) in elarge]
[nodetemp.append(v) for (u, v) in elarge]
# how many edges in elarge?
print('there are ' + str(len(elarge)) + ' edges with weight greater than ' + str(edge_thresh))
nodetemp = pd.Series(nodetemp)
nodesmall = list(nodetemp.unique())
print('there are ' + str(len(nodesmall)) + ' corresponding nodes')
# make the graph from nodesmall and elarge
Gtemp = nx.Graph()
Gtemp.add_nodes_from(nodesmall)
Gtemp.add_weighted_edges_from(elarge)
return elarge, nodesmall, Gtemp
def run_lancichinetti_clustering(Gtemp,data_path,code_path,results_folder,algorithm='louvain', num_c_reps = 2,remove_flag=True):
'''
This function calculates the clustering algorithm specified by 'algorithm'. The source code must be downloaded
and installed from https://sites.google.com/site/andrealancichinetti/software.
Note, the code failed out of the box. Had to change line 155 of 'wsarray.h'
to: 'pair<int, double> * ww = new pair<int, double> [_size_];'
See Lancichinetti's ReadMe doc for more info on how algorithms work
beware: oslum algorithms are either VERY slow, or don't work at all
returns partition
'''
# check if Gtemp is bipartite
is_G_bipartite = nx.bipartite.is_bipartite(Gtemp)
if is_G_bipartite:
v1_nodes,v2_nodes = nx.bipartite.sets(Gtemp)
v1map = dict(zip(v1_nodes,range(len(v1_nodes))))
v2map = dict(zip(v2_nodes,range(len(v2_nodes))))
v_all_map = v1map.copy()
v_all_map.update(v2map)
else:
v_all_map = dict(zip(Gtemp.nodes(),range(len(Gtemp.nodes()))))
Gtemp_mapped = nx.relabel_nodes(Gtemp,v_all_map)
edge_list_mapped = nx.to_edgelist(Gtemp_mapped)
e1mapped,e2mapped,weight = zip(*edge_list_mapped)
weight_list = [x['weight'] for x in weight]
# pick the right algorithm
if algorithm=='oslom_undirected':
# note: oslum is very slow
pnum=0
elif algorithm=='oslom_directed':
pnum=1
elif algorithm=='infomap_undirected':
pnum=2
elif algorithm=='infomap_directed':
pnum=3
elif algorithm=='louvain':
pnum=4
elif algorithm=='label_propagation':
pnum=5
elif algorithm=='hierarchical_infomap_undirected':
pnum=6
elif algorithm=='hierarchical_infomap_directed':
pnum=7
elif algorithm=='modularity_optimization':
pnum=8
edge_list_path = data_path[:-4] + '_edge_list.csv'
edge_list_df = pd.DataFrame({'v1':e1mapped,'v2':e2mapped,'weight':weight_list})
edge_list_df.to_csv(edge_list_path,sep=' ',index=False,header=False)
if remove_flag:
# check if the directory already exists, delete it if it does. Otherwise the code throws an error
if os.path.isdir(results_folder):
shutil.rmtree(results_folder)
command_line = "python " + code_path + " -n " + edge_list_path + " -p " + str(pnum) + " -f " +results_folder + " -c " + str(num_c_reps)
os.system(command_line)
# parse the results
partition = parse_results_lancichinetti(results_folder,algorithm=algorithm)
# translate back to correct ids
v_all_map_r = {v: k for k, v in v_all_map.items()}
# replace keys in partition
partition = dict(partition)
old_keys = partition.keys()
for old_key in old_keys:
new_key = v_all_map_r[old_key]
partition[new_key] = partition.pop(old_key)
partition = pd.Series(partition)
return partition
def parse_results_lancichinetti(results_folder,algorithm='louvain'):
'''
This function parses the results from lancichinetti code (doesn't work for OSLOM algorithm yet...
have to decide what to do about non-unique community membership)
Returns pandas series object 'partition'
'''
results_file = results_folder + '/results_consensus/tp'
with open(results_file, "r") as ins:
group_id_dict = dict()
count = -1
for line in ins:
if (algorithm=='hierarchical_infomap_undirected') or (algorithm=='hierarchical_infomap_directed'):
count = count+1
# inconsistent file for this algorithm
line = re.split(r' ', line.rstrip(' '))
elif (algorithm=='oslom_undirected') or (algorithm=='oslom_directed'):
is_new_module = (line.find('module')>0)
if is_new_module:
count = count+1
else:
line = re.split(r' ', line.rstrip(' '))
else:
count = count+1
line = re.split(r'\t+', line.rstrip('\t'))
group_id_dict[count]=line[:-1] # don't keep trailing \n
# reverse the group_id_dict
partition = dict()
for g in group_id_dict.keys():
node_list_temp = group_id_dict[g]
for n in node_list_temp:
if int(n) in partition.keys():
partition[int(n)].append(g)
else:
partition[int(n)] = [g]
partition = pd.Series(partition)
return partition
def results_TCGA_cluster(data_path,code_path,results_path, algorithm='louvain',edge_thresh=0,gamma=1,cluster_size_min=5, cluster_size_max=2000, write_file_name='cluster_results.csv', print_flag=True):
'''
Function to process and cluster TCGA correlation files
Inputs:
- data_path: path to the correlation file, including file, example: '/home/ec2-user/data/LIHC/mirna_vs_rnaseq.cor'
- code_path: path to location of 'select.py' function, example: '/home/ec2-user/code/clustering_programs_5_2'
- results_path: path to storage of results, example: '/home/ec2-user/results'
- algorithm: name of clustering algorithm to use. Can be one of:
- 'oslom_undirected'
- 'infomap_undirected'
- 'louvain'
- 'label_propagation'
- 'hierarchical_infomap_undirected'
- 'modularity_optimization'
(see https://sites.google.com/site/andrealancichinetti/software for more details)
- edge_thresh: edge weight cutoff (default= 0)
- gamma: tuning parameter for weights (default = 1--> works with all algorithms)
- cluster_size_min: minimum cluster size to include (default = 5)
- cluster_size_max: maximum cluster size to include (default = 2000)
- write_file_name: path and name to store results (example: '/home/ec2-user/results/louvain_cluster_results.csv')
- print_flag: decide whether to print out progress (default = True)
'''
# import the data
print('importing the data...')
D_df, edge_list_total = import_TCGA_data(data_path)
# calculate louvain clusters
print('thresholding edges...')
elarge,nodesmall,Gtemp = find_edges_thresh(edge_list_total, edge_thresh = edge_thresh,gamma=gamma)
print('calculating optimal community partitions using modularity maximization...')
#partition = community.best_partition(Gtemp)
# check if Gtemp is bipartite
is_G_bipartite = nx.bipartite.is_bipartite(Gtemp)
results_folder = results_path + '/results_'+algorithm+'_temp'
code_select = code_path+'/select.py'
partition = run_lancichinetti_clustering(Gtemp,data_path,code_select,results_folder,algorithm=algorithm,num_c_reps=5)
# calculate the true value counts (flatten the list of lists first)
flat_part_values = [item for sublist in partition.values for item in sublist]
flat_part_VC = pd.Series(flat_part_values).value_counts()
# switch partition values to tuples, so value_counts() works
part_values = [tuple(x) for x in partition.values]
partition = pd.Series(part_values,list(partition.index))
partition_VC = partition.value_counts()
# set low co-occurence nodes to group -1
keylist = partition.keys()
allnodes = []
allnodes.extend(D_df['var1'])
allnodes.extend(D_df['var2'])
allnodes = list(np.unique(allnodes))
setdiff_nodes = np.setdiff1d(allnodes,keylist)
for s in range(len(setdiff_nodes)):
partition[setdiff_nodes[s]]=[-1]
# setup data for output- only save within community edges
partition = dict(partition)
numedges = len(D_df.var1)
numnodes = len(partition)
node1list, node2list, corrlist, pvallist, groupidlist = [],[],[],[],[]
for i in range(numedges):
# print out some progress if print_flag True
if print_flag:
if (i%100000)==0:
print('%.2f percent written' % (i/float(numedges)))
key1 = D_df.var1[i]
key2 = D_df.var2[i]
# check how many groups key1 and key2 belong to
num_groups_1 = len(partition[key1])
num_groups_2 = len(partition[key2])
groups_both = []
groups_both.extend(partition[key1])
groups_both.extend(partition[key2])
groups_both = list(np.unique(groups_both))
# fill in lists if node 1 and node 2 are in the same group
for g in groups_both:
if (g in partition[key1]) and (g in partition[key2]) and (g>-1) and (flat_part_VC[g]>=cluster_size_min) and (flat_part_VC[g]<=cluster_size_max):
node1list.append(key1)
node2list.append(key2)
corrlist.append(D_df['corr'][i])
pvallist.append(D_df['p'][i])
groupidlist.append(g)
# wrap results in a dataframe
D_with_groups = pd.DataFrame({'var1':node1list,'var2':node2list,'corr':corrlist,'p':pvallist,'group_id':groupidlist})
# trim the groups (connect periphery nodes to core nodes)
D_trimmed = cluster_connect.trim_cluster_df(D_with_groups,num_edges_to_keep=20000)
D_trimmed.index = range(len(D_trimmed))
# sort the groups
D_with_groups_sorted = sort_clusters(D_trimmed,partition,is_bipartite=is_G_bipartite,print_flag=print_flag)
# write results to file
D_with_groups_sorted.to_csv(write_file_name,sep='\t',index=False)
def local_modularity(G,node_list,weighted_tf=False):
''' Calculate the local modularity of a group of nodes. Sum of all partition Lmods = total modularity'''
# is graph weighted?
if weighted_tf:
degree_G = G.degree(G.nodes(),weight='weight')
else:
degree_G = G.degree(G.nodes())
sub_G = G.subgraph(node_list)
m2 = np.sum(degree_G.values()) # total number of edges in subgraph
L_mod = 0
for i in range(len(node_list)):
for j in range(len(node_list)):
nodei = node_list[i]
nodej = node_list[j]
# does the edge exist?
if sub_G.has_edge(nodei,nodej):
edge_data = sub_G.get_edge_data(nodei,nodej)
if weighted_tf:
weight = edge_data['weight']
else:
weight = 1
else:
weight = 0
L_mod = L_mod + weight - degree_G[nodei]*degree_G[nodej]/float(m2)
L_mod = L_mod/m2 # normalize it
return L_mod
def sort_clusters(D_with_groups,partition,is_bipartite=False,print_flag=True,plot_flag=False):
# input D_with_groups and partition from results_TCGA_cluster
# is the network symmetric or bipartite? --> import this from Gtemp in 'results_TCGA_cluster'
# return sorted dataframe
# how many groups are there?
groups = D_with_groups['group_id'].unique()
num_groups = len(groups)
v1temp = D_with_groups['var1']
v2temp = D_with_groups['var2']
v1temp = np.unique(v1temp)
v2temp = np.unique(v2temp)
num_overlap = np.intersect1d(v1temp,v2temp)
# sort group_ids by corr, re-order dataframe
corr_sorted_total,p_sorted_total = [],[]
v1total,v2total = [],[]
group_total = []
group_count = 0
for focal_group in groups:
group_count += 1
if print_flag:
print('sorting group ' + str(group_count) + ' out of ' + str(num_groups))
c_idx = list(D_with_groups[D_with_groups['group_id']==focal_group].index)
vrow = D_with_groups['var1'][c_idx]
vrow = np.unique(vrow)
num_nodes_r = len(vrow)
vcol = D_with_groups['var2'][c_idx]
vcol = np.unique(vcol)
num_nodes_c = len(vcol)
vtot = []
vtot.extend(vrow)
vtot.extend(vcol)
v_unique = np.unique(vtot)
num_nodes_t = len(v_unique)
v_map_tot = dict(zip(v_unique,range(len(v_unique))))
v_map_tot_r = dict(zip(range(len(v_unique)),v_unique))
v_map_row = dict(zip(vrow,range(num_nodes_r)))
v_map_row_r = dict(zip(range(num_nodes_r),vrow))
v_map_col = dict(zip(vcol,range(num_nodes_c)))
v_map_col_r = dict(zip(range(num_nodes_c),vcol))
# make corr_mat and p_mat symmetric if there is overlap between vrow and vcol
if is_bipartite:
corr_mat = np.zeros((num_nodes_r,num_nodes_c))
p_mat = np.ones((num_nodes_r,num_nodes_c))
else:
corr_mat = np.zeros((num_nodes_t,num_nodes_t))
p_mat = np.ones((num_nodes_t, num_nodes_t))
for i in c_idx:
v1 = D_with_groups['var1'][i]
v2 = D_with_groups['var2'][i]
# make it symmetric if there is overlap between vrow and vcol
if is_bipartite:
corr_mat[v_map_row[v1],v_map_col[v2]] = D_with_groups['corr'][i]
p_mat[v_map_row[v1],v_map_col[v2]] = D_with_groups['p'][i]
else:
corr_mat[v_map_tot[v1],v_map_tot[v2]] = D_with_groups['corr'][i]
p_mat[v_map_tot[v1],v_map_tot[v2]] = D_with_groups['p'][i]
corr_mat[v_map_tot[v2],v_map_tot[v1]] = D_with_groups['corr'][i] # make it symmetric
p_mat[v_map_tot[v2],v_map_tot[v1]] = D_with_groups['p'][i] # make it symmetric
if (not is_bipartite) and len(v_map_tot)>1:
#DRmat = ssd.squareform(ssd.pdist(np.abs(corr_mat)))
DRmat = slow_dist_mat(np.abs(corr_mat)) # replaced dist mat calc because indices were wrong
row_Z = sch.linkage(DRmat)
row_idx = sch.leaves_list(row_Z)
elif is_bipartite and len(v_map_row)>1:
#DRmat = ssd.squareform(ssd.pdist(np.abs(corr_mat)))
DRmat = slow_dist_mat(np.abs(corr_mat))
row_Z = sch.linkage(DRmat)
row_idx = sch.leaves_list(row_Z)
else:
# don't sort if there is only one row
row_idx=0
if (not is_bipartite) and len(v_map_tot)>1:
#DCmat = ssd.squareform(ssd.pdist(np.abs(np.transpose(corr_mat))))
DCmat = slow_dist_mat(np.transpose(np.abs(corr_mat)))
col_Z = sch.linkage(DCmat)
col_idx = sch.leaves_list(col_Z)
elif is_bipartite and len(v_map_col)>1:
#DCmat = ssd.squareform(ssd.pdist(np.abs(np.transpose(corr_mat))))
DCmat = slow_dist_mat(np.transpose(np.abs(corr_mat)))
col_Z = sch.linkage(DCmat)
col_idx = sch.leaves_list(col_Z)
else:
# don't sort if there is only one column
col_idx = 0
corr_shape = np.shape(corr_mat)
print(corr_shape)
numrows = corr_shape[0]
numcols = corr_shape[1]
corr_mat_sorted = corr_mat
p_mat_sorted = p_mat
if (numrows>1) and (numcols>1):
# only need to sort if corr_mat has more than one row/col
corr_mat_sorted = corr_mat_sorted[row_idx,:]
corr_mat_sorted = corr_mat_sorted[:,col_idx]
p_mat_sorted = p_mat_sorted[row_idx,:]
p_mat_sorted = p_mat_sorted[:,col_idx]
# reshape sorted corr_mat, save to new df?
corr_mat_sorted_flat = np.ravel(corr_mat_sorted)
p_mat_sorted_flat = np.ravel(p_mat_sorted)
if plot_flag:
plt.matshow(corr_mat_sorted,cmap='bwr',vmin=-1,vmax=1)
# also save row/col gene ids
mgrid_test = np.mgrid[0:numrows,0:numcols]
mgrid_rows = mgrid_test[0]
mgrid_cols = mgrid_test[1]
row_flat = np.ravel(mgrid_rows)
col_flat = np.ravel(mgrid_cols)
# then translate to gene ids
v1list = []
v2list = []
# handle symmetry
if is_bipartite:
if numrows>1:
v1list = [v_map_row_r[row_idx[r]] for r in row_flat]
else:
v1list = [v_map_row_r[r] for r in row_flat]
if numcols>1:
v2list = [v_map_col_r[col_idx[c]] for c in col_flat]
else:
v2list = [v_map_col_r[c] for c in col_flat]
else:
v1list = [v_map_tot_r[row_idx[r]] for r in row_flat]
v2list = [v_map_tot_r[col_idx[c]] for c in col_flat]
# also save group ids
group_list = (np.ones((1,len(v1list)))*focal_group)
group_list = list(group_list[0])
corr_sorted_total.extend(corr_mat_sorted_flat)
p_sorted_total.extend(p_mat_sorted_flat)
v1total.extend(v1list)
v2total.extend(v2list)
group_total.extend(group_list)
D_with_groups_sorted = pd.DataFrame({'corr':corr_sorted_total,'p':p_sorted_total,
'var1':v1total,'var2':v2total,'group_id':group_total})
return D_with_groups_sorted
def slow_dist_mat(C):
'''
Helper function to calculate the distance matrix (using squareform and pdist resulted in re-ordering indices)
'''
dist = np.zeros((len(C),len(C)))
for i in range(len(C)-1):
p1 = C[i,:]
for j in range(i+1,len(C)):
p2 = C[j,:]
dist[i,j] = ssd.cdist([p1],[p2])[0][0]
dist[j,i] = dist[i,j]
return dist
def cal_mirna_enrichment(Gtemp, GO_ID_list, total_unique_gene, GO_Term_list, focal_node):
enrichment_mirna = dict()
# find neighbors of focal_node
if focal_node in Gtemp.nodes():
f_neighbors = Gtemp.neighbors(focal_node)
if len(f_neighbors)>20:
print(focal_node + ' has ' + str(len(f_neighbors)) + ' neighbors')
# annotate this list
enriched_list = HypergeomCalculator.calc_enrichment(f_neighbors, GO_ID_list, total_unique_gene, GO_Term_list)
GO_temp = dict()
for enriched_item in enriched_list:
if enriched_item['qvalue'] > 10:
GO_temp[enriched_item['go_id']] = enriched_item['qvalue']
if True:
print(enriched_item['name'] + ': q-value = ' + str(enriched_item['qvalue']))
# only create a key for focal node if it has some significant entries
if len(GO_temp) > 0:
enrichment_mirna[focal_node] = GO_temp
return enrichment_mirna
def save_ivanovska_clusters(data_path,edge_thresh=.5,gamma=1,qthresh=10, cluster_size_min=5,
print_flag=True,plot_flag=False,write_file_name='GO_clusters_temp.csv'):
'''
This is a function that implements the Ivanovska clustering method of annotating var2 terms which are highly associated
with var1 terms, annotating against the gene ontology, then clustering this matrix.
Saves an edge list which contains var1 terms with significant annotations, the terms they annotate to, their q-value,
and the group they belong to. The edge list has been sorted so that the top annotating terms/genes appear highest in
each cluster.
arguments:
- data_path: path to correlation edge list (example: data_path = '/Users/brin/Documents/TCGA_data/LIHC/mirna_vs_rnaseq.cor')
- edge_thresh: cutoff for how highly associated var2 genes must be to each var1 (default = .5)
- gamma: parameter to scale correlations (default = 1.. probably don't want to change this)
- qthresh: cutoff for significance of enriched GO terms (default = 10)
- cluster_size_min: minimum cluster size to save
- print_flag: print some diagnostics? (default = True)
- plot_flag: plot the total heatmap? (default = False)
- write_file_name: where should we write the final file? (default = 'GO_clusters_temp.csv')
returns: None
'''
#data_path = '/Users/brin/Documents/TCGA_data/LIHC/mirna_vs_rnaseq.cor'
#edge_thresh = .5
#gamma = 1
#qthresh = 10 # minimum enrichment significance to record
#print_flag = True
#plot_flag = False
#write_file_name = 'GO_clusters_temp.csv'
#cluster_size_min = 5
OV_df, edge_list = import_TCGA_data(data_path) # import the data
elarge, nodesmall, Gtemp = find_edges_thresh(edge_list,edge_thresh=edge_thresh,gamma=gamma) # build the graph
# import GO annotation tools (this takes a little time) NOTE: CHANGE THESE PATHS
go_gene_file = '/shared/workspace/SearchEngineProject/GO/GO2all_locus.txt'
gene_info_file = '/shared/workspace/SearchEngineProject/GO/Homo_sapiens.gene_info'
go_term_file = '/shared/workspace/SearchEngineProject/GO/go.obo'
GO_ID_list, total_unique_gene, GO_Term_list = GOLocusParser.parse(go_gene_file, gene_info_file, go_term_file)
# write a function to annotate genes which correlate highly with any mirna (e.g. neighbors in the graph)
#nodes_A,nodes_B = nx.bipartite.sets(Gtemp)
nodes_A = list(OV_df['var1'].unique())
nodes_B = list(OV_df['var2'].unique())
test_nodes = nodes_A[-5:]
func = partial(cal_mirna_enrichment, Gtemp, GO_ID_list, total_unique_gene, GO_Term_list)
pool = Pool(processes=2)
enrichment_list = pool.map(func, test_nodes)
pool.close()
pool.join()
enrichment_mirna = {}
for result in enrichment_list:
for key in result:
enrichment_mirna.update({key:result.get(key)})
if len(enrichment_mirna)>2:
GO_unique = [enrichment_mirna[n].keys() for n in enrichment_mirna.keys()]
# flatten the list
GO_unique = [item for sublist in GO_unique for item in sublist]
GO_unique = np.unique(GO_unique)
print(len(GO_unique))
# make a dictionary to map from GO_unique to index, and mirna to index
GO_map = dict(zip(GO_unique,range(len(GO_unique))))
GO_map_r = dict(zip(range(len(GO_unique)),GO_unique))
mirna_map = dict(zip(enrichment_mirna.keys(),range(len(enrichment_mirna.keys()))))
mirna_map_r = dict(zip(range(len(enrichment_mirna.keys())),enrichment_mirna.keys()))
# now make the correlation matrix: GO_mirna
GO_mirna = np.zeros((len(GO_map),len(mirna_map)))
# loop over mirnas
for n in enrichment_mirna.keys():
mirna_idx = mirna_map[n]
# loop over GO terms in each mirna
for g in enrichment_mirna[n].keys():
GO_idx = GO_map[g]
qtemp = enrichment_mirna[n][g]
# fill in the matrix
GO_mirna[GO_idx,mirna_idx] = qtemp
# now try clustering using louvain- what do we get?
go_mirna_for_graph = dict()
qvec = []
for n in enrichment_mirna.keys():
# loop over GO terms in each mirna
dict_temp = dict()
for g in enrichment_mirna[n].keys():
qtemp = enrichment_mirna[n][g]
qvec.append(qtemp)
#qtemp = np.exp(-qtemp**2)
#qtemp = round(qtemp*5)
qtemp = qtemp**gamma
# fill in the dict
dict_temp[g]={'weight':qtemp}
go_mirna_for_graph[n] = dict_temp
G_go_mirna = nx.from_dict_of_dicts(go_mirna_for_graph)
#partition = community.best_partition(G_go_mirna)
dendo = community.generate_dendrogram(G_go_mirna)
partition = community.partition_at_level(dendo, 0)
partition = pd.Series(partition)
partition_sort = partition.sort(axis=0,inplace=False)
idx_sort = list(partition_sort.index)
idx_mirna = np.array([m for m in idx_sort if (m in mirna_map.keys())]) # np.intersect1d(idx_sort,mirna_map.keys())
grp_mirna = np.array([partition_sort[m] for m in idx_sort if (m in mirna_map.keys())])
idx_GO = np.array([g for g in idx_sort if (g in GO_map.keys())])
grp_GO = np.array([partition[g] for g in idx_sort if (g in GO_map.keys())])
group_ids = list(np.unique(partition_sort))
col_idx = []
row_idx = []
corr_sorted_total, gene_list_total,GO_term_list_total,group_total = [],[],[],[]
for g in group_ids:
# sort individual groups by mean GO value in each row/column
idx_mirna_focal = idx_mirna[grp_mirna==g]
col_temp = np.array([mirna_map[i] for i in idx_mirna_focal])
mean_mirna_focal = np.mean(GO_mirna[:,col_temp],0)
mean_sort = np.argsort(mean_mirna_focal)
mean_sort = mean_sort[::-1] # sort descending
col_temp = col_temp[mean_sort]
# append to col_idx
col_idx.extend(col_temp)
idx_GO_focal = idx_GO[grp_GO==g]
row_temp = np.array([GO_map[i] for i in idx_GO_focal])
print "break point!!!!"
print idx_mirna_focal
if len(row_temp)>0:
# check that row_temp isn't empty
mean_GO_focal = np.mean(GO_mirna[row_temp,:],1)
mean_sort = np.argsort(mean_GO_focal)
mean_sort = mean_sort[::-1] # sort descending
row_temp = row_temp[mean_sort]
# append to col_idx
row_idx.extend(row_temp)
# save out flattened sections of correlation matrix as clusters
# only save if there are more than cluster_size_min items in cluster
cluster_size = np.sum(partition==g)
if cluster_size>cluster_size_min:
corr_mat_focal = GO_mirna
corr_mat_focal = corr_mat_focal[row_temp,:]
corr_mat_focal = corr_mat_focal[:,col_temp]
corr_mat_focal_flat = np.ravel(corr_mat_focal)
corr_shape = np.shape(corr_mat_focal)
print(corr_shape)
numrows = corr_shape[0]
numcols = corr_shape[1]
mgrid_test = np.mgrid[0:numrows,0:numcols]
mgrid_rows = mgrid_test[0]
mgrid_cols = mgrid_test[1]
row_flat = np.ravel(mgrid_rows)
col_flat = np.ravel(mgrid_cols)
# then translate to gene ids/ GO term names
gene_list = []
gene_list = [mirna_map_r[col_temp[i]] for i in col_flat]
GO_term_list = [GO_map_r[row_temp[i]] for i in row_flat]
# also save the group list
group_list = (np.ones((1,len(gene_list)))*g)
group_list = list(group_list[0])
corr_sorted_total.extend(corr_mat_focal_flat)
gene_list_total.extend(gene_list)
GO_term_list_total.extend(GO_term_list)
group_total.extend(group_list)
GO_name_list_total=[GO_Term_list[x][0] for x in GO_term_list_total]
D_with_groups_sorted = pd.DataFrame({'qvalue':corr_sorted_total,'gene_name':gene_list_total,
'GO_term':GO_term_list_total,'GO_name':GO_name_list_total,
'group_id':group_total})
else:
# save out dummy dataframe if there are not enough enriched terms
D_with_groups_sorted = pd.DataFrame({'qvalue':np.nan,'gene_name':np.nan,
'GO_term':np.nan, 'GO_name':np.nan,
'group_id':np.nan},index=[0])
# write results to file
D_with_groups_sorted.to_csv(write_file_name,sep='\t',index=False)
go_mirna_L = GO_mirna
go_mirna_L = go_mirna_L[row_idx,:]
go_mirna_L = go_mirna_L[:,col_idx]
if plot_flag:
plt.figure(figsize=(20,50))
plt.matshow(go_mirna_L,fignum=False,cmap='jet',aspect='auto',vmin=0,vmax=30)
xtick_labels = [mirna_map_r[i] for i in col_idx]
ytick_labels = [GO_map_r[i] for i in row_idx]
plt.xticks(range(len(xtick_labels)),xtick_labels,rotation=90)
plt.yticks(range(len(ytick_labels)),ytick_labels,fontsize=6)
plt.grid('off')
#plt.savefig('/Users/brin/Google_Drive/UCSD/update_16_01/LIHC_go_mirna_louvain.png',dpi=150)
| mit | -3,302,082,777,373,428,000 | 38.912371 | 211 | 0.599348 | false |
Rfam/rfam-production | scripts/support/mirnas/report_to_mirna_input.py | 1 | 3367 | import argparse
import json
import os
from datetime import date
# -------------------------------------------------------------------------------
def extract_new_mirnas_from_report(report_tsv, type='new'):
"""
"""
new_mirnas = {}
fp = open(report_tsv, 'r')
count = 0
for line in fp:
line = line.strip().split('\t')
# check if candidate mirna is a new family
if line[6].lower() == "new family":
# skip families requiring review
if line[1] != '?' and line[1] != '' and line[2] != "1_SEED":
if line[0] not in new_mirnas:
print line
new_mirnas[line[0]] = line[1]
elif line[6].lower() == 'done':
count += 1
fp.close()
return new_mirnas
# -------------------------------------------------------------------------------
def extract_rfam_family_accessions(report_file):
fp = open(report_file, 'r')
accession_map = {}
for line in fp:
line = line.strip().split('\t')
overlap = float(line[4])
if overlap <= 100.0:
# converts to upper to ensure labels match the constant
if line[6].strip().upper() == "UPDATE SEED":
rfam_acc = line[3].strip()
rfam_acc_list = []
if rfam_acc.find(',') == -1:
rfam_acc_list = rfam_acc.split(',')
else:
rfam_acc_list = [rfam_acc]
threshold = 0.0
if line[1] != '':
threshold = float(line[1])
# trim any whitespace characters
mirbase_id = line[0].strip()
accession_map[mirbase_id] = {"rfam_acc": rfam_acc_list,
"threshold": threshold,
"overlap": float(line[4])}
fp.close()
return accession_map
# -------------------------------------------------------------------------------
def parse_arguments():
"""
"""
parser = argparse.ArgumentParser()
parser.add_argument("--report", help="miRNA report in .tsv format", action="store")
parser.add_argument("--dest-dir", help="Desctination directory", action="store", default=os.getcwd())
parser.add_argument("--old-rfam", help="Fetches old Rfam miRNA accessions to be updated",
action="store_true", default=False)
parser.add_argument("--create-dump", help="Generates a JSON (.json) dump in destination directory",
action="store_true", default=False)
return parser
# -------------------------------------------------------------------------------
if __name__ == '__main__':
parser = parse_arguments()
args = parser.parse_args()
accessions = None
if not args.old_rfam:
new_mirnas = extract_new_mirnas_from_report(args.report, type='new')
accessions = new_mirnas
else:
accessions = extract_rfam_family_accessions(args.report)
if args.create_dump:
filename = "new_mirnas_"
if args.old_rfam:
filename = "mirna_families_to_update_"
fp_out = open(os.path.join(args.dest_dir, filename + str(date.today()) + ".json"), 'w')
json.dump(accessions, fp_out)
fp_out.close()
| apache-2.0 | -6,873,067,566,488,718,000 | 28.278261 | 105 | 0.471339 | false |
jucacrispim/toxicbuild | toxicbuild/master/slave.py | 1 | 21372 | # -*- coding: utf-8 -*-
# Copyright 2016-2020 Juca Crispim <[email protected]>
# This file is part of toxicbuild.
# toxicbuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# toxicbuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with toxicbuild. If not, see <http://www.gnu.org/licenses/>.
import asyncio
from collections import defaultdict
import time
import traceback
from mongomotor.fields import (StringField, IntField, BooleanField,
DictField, ListField)
from toxicbuild.core.exceptions import ToxicClientException, BadJsonData
from toxicbuild.core.utils import (string2datetime, LoggerMixin, now,
localtime2utc)
from toxicbuild.common.exchanges import notifications
from toxicbuild.master.aws import EC2Instance
from toxicbuild.master.build import BuildStep, Builder
from toxicbuild.master.client import get_build_client
from toxicbuild.master.document import OwnedDocument
from toxicbuild.master.signals import (build_started, build_finished,
step_started, step_finished,
step_output_arrived, build_preparing)
class Slave(OwnedDocument, LoggerMixin):
""" Slaves are the entities that actualy do the work
of execute steps. The comunication to slaves is through
the network (using :class:`toxicbuild.master.client.BuildClient`).
The steps are actually decided by the slave.
"""
INSTANCE_TYPES = ('ec2',)
INSTANCE_CLS = {'ec2': EC2Instance}
DYNAMIC_HOST = '<DYNAMIC-HOST>'
host = StringField(required=True)
"""Slave's host."""
port = IntField(required=True)
"""Port for the slave to listen."""
token = StringField(required=True)
"""Token for authentication."""
is_alive = BooleanField(default=False)
"""Indicates if the slave is up and running."""
use_ssl = BooleanField(default=True)
"""Indicates if the build server in uses ssl connection."""
validate_cert = BooleanField(default=True)
"""Indicates if the certificate from the build server should be validated.
"""
on_demand = BooleanField(default=False)
"""If the slave is on-demand it will be started when needed and
will be stopped when all the builds for this slave are completed.
"""
instance_type = StringField(choices=INSTANCE_TYPES)
"""The type of instance used. Currently only 'ec2' is supported.
"""
instance_confs = DictField()
"""Configuration paramenters for the on-demand instance.
"""
parallel_builds = IntField(default=0)
"""Max number of builds in parallel that this slave exeutes.
If no parallel_builds there's no limit.
"""
queue_count = IntField(default=0)
"""How many builds are waiting to run in this repository."""
enqueued_builds = ListField(StringField())
"""Uuids of builds enqueued to run in this slave."""
running_count = IntField(default=0)
"""How many builds are running in this slave."""
running_repos = ListField(StringField())
"""The ids of the repositories that have builds running in this slave.
"""
meta = {
'ordering': ['name']
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# So, the thing here is that we may have a race condition
# with the last step output and the build finished messages.
# In fact, all the build management/build server communitation already
# is on its limits. A new implementation is needed.
self._step_finished = defaultdict(lambda: False)
self._step_output_cache = defaultdict(list)
self._step_output_cache_time = defaultdict(float)
self._step_output_cache_limit = 1 # seconds
self._step_output_is_updating = defaultdict(lambda: False)
async def save(self, *args, **kwargs):
if self.on_demand and not self.host:
self.host = self.DYNAMIC_HOST
r = await super().save(*args, **kwargs)
return r
@classmethod
async def create(cls, **kwargs):
"""Creates a new slave"""
slave = cls(**kwargs)
await slave.save()
return slave
def to_dict(self, id_as_str=False):
"""Returns a dict representation of the object."""
host = self.host if self.host != self.DYNAMIC_HOST else ''
my_dict = {'name': self.name, 'host': host,
'port': self.port, 'token': self.token,
'full_name': self.full_name,
'is_alive': self.is_alive, 'id': self.id,
'on_demand': self.on_demand,
'use_ssl': self.use_ssl,
'validate_cert': self.validate_cert,
'instance_type': self.instance_type,
'instance_confs': self.instance_confs}
if id_as_str:
my_dict['id'] = str(self.id)
return my_dict
@classmethod
async def get(cls, **kwargs):
"""Returns a slave instance."""
slave = await cls.objects.get(**kwargs)
return slave
@property
def instance(self):
"""Returns an on-demand instance wrapper.
"""
cls = self.INSTANCE_CLS[self.instance_type]
return cls(**self.instance_confs)
async def enqueue_build(self, build):
"""Marks a build as enqueued in this slave. It does not enqueue
two times the same build, if the build is already enqueued simply
skip it returning False
"""
buuid = str(build.uuid)
if buuid in self.enqueued_builds:
return False
self.enqueued_builds.append(buuid)
await self.update(
inc__queue_count=1,
enqueued_builds=self.enqueued_builds)
return True
async def dequeue_build(self, build):
"""Unmark a build as enqueued. If the build is not enqueued returns
False.
"""
try:
i = self.enqueued_builds.index(str(build.uuid))
self.enqueued_builds.pop(i)
except ValueError:
return False
await self.update(dec__queue_count=1,
enqueued_builds=self.enqueued_builds)
return True
async def add_running_repo(self, repo_id):
"""Increments the number of running builds in this slave and
adds the repository id to the running repos list. Also decrements
the queue count.
:param repo_id: An id of a repository.
"""
self.running_repos.append(str(repo_id))
self.running_count += 1
self.queue_count -= 1
await self.update(dec__queue_count=1, inc__running_count=1,
set__running_repos=self.running_repos)
async def rm_running_repo(self, repo_id):
"""Decrements the number of running builds in this slave and
removes the repository id from the running repos list
:param repo_id: An id of a repository.
"""
self.running_repos.remove(str(repo_id))
self.running_count -= 1
await self.update(
dec__running_count=1, set__running_repos=self.running_repos)
async def start_instance(self):
"""Starts an on-demand instance if needed."""
if not self.on_demand:
return False
is_running = await self.instance.is_running()
if not is_running:
self.log('Starting on-demand instance for {}'.format(self.id),
level='debug')
await self.instance.start()
ip = await self.instance.get_ip()
if ip and self.host == self.DYNAMIC_HOST:
self.host = ip
await self.wait_service_start()
self.log('Instance for {} started with ip {}'.format(self.id, ip),
level='debug')
return ip
async def stop_instance(self):
"""Stops an on-demand instance"""
if not self.on_demand:
return False
if self.queue_count or self.running_count:
self.log('Instance still building, not stopping it.',
level='debug')
return False
self.log('Stopping on-demand instance for {}'.format(self.id),
level='debug')
is_running = await self.instance.is_running()
if not is_running:
self.log('Instance for {} already stopped. Leaving.'.format(
self.id), level='debug')
return False
await self.instance.stop()
self.log('Instance for {} stopped'.format(self.id), level='debug')
return True
async def get_client(self):
""" Returns a :class:`~toxicbuild.master.client.BuildClient` instance
already connected to the server.
"""
connected_client = await get_build_client(
self, self.host, self.port, use_ssl=self.use_ssl,
validate_cert=self.validate_cert)
return connected_client
async def healthcheck(self):
""" Check if the build server is up and running
"""
with (await self.get_client()) as client:
alive = await client.healthcheck()
return alive
async def wait_service_start(self, timeout=10):
"""Waits for the toxicslave service start in the on-demand
instance.
"""
self.log('waiting toxicslave service start for {}'.format(self.id),
level='debug')
i = 0
while i < timeout:
try:
await self.healthcheck()
return True
except ToxicClientException:
raise
except Exception as e:
self.log('Service down {}'.format(i), level='debug')
self.log(str(e), level='debug')
i += 1
await asyncio.sleep(1)
raise TimeoutError
async def list_builders(self, revision):
""" List builder available in for a given revision
:param revision: An instance of
:class:`toxicbuild.master.repository.RepositoryRevision`
"""
repository = await revision.repository
repo_url = repository.url
vcs_type = repository.vcs_type
branch = revision.branch
named_tree = revision.commit
with (await self.get_client()) as client:
builders = await client.list_builders(repo_url, vcs_type,
branch, named_tree)
builder_instnces = []
for bname in builders:
builder = await Builder.get_or_create(repository=repository,
name=bname)
builder_instnces.append(builder)
return list(builder_instnces)
async def _finish_build_start_exception(self, build, repo, exc_out):
build.status = 'exception'
build.steps = [BuildStep(repository=repo, name='Exception',
command='exception',
output=exc_out, status='exception')]
await build.update()
async def build(self, build, **envvars):
""" Connects to a build server and requests a build on that server
:param build: An instance of :class:`toxicbuild.master.build.Build`
:param envvars: Environment variables to use in the builds.
"""
repo = await build.repository
await self.add_running_repo(repo.id)
await self.dequeue_build(build)
try:
build.status = build.PREPARING
await build.update()
repo = await build.repository
build_preparing.send(str(repo.id), build=build)
try:
await self.start_instance()
except Exception as e:
await self._finish_build_start_exception(build, repo, str(e))
return False
with (await self.get_client()) as client:
try:
build_info = await client.build(
build,
envvars=envvars,
process_coro=self._process_info)
except (ToxicClientException, BadJsonData):
output = traceback.format_exc()
build.status = 'exception'
build.started = build.started or localtime2utc(now())
build.finished = build.finished or localtime2utc(now())
exception_step = BuildStep(repository=repo, output=output,
started=localtime2utc(now()),
finished=localtime2utc(now()),
status='exception',
command='', name='exception')
build.steps.append(exception_step)
await build.update()
build_info = build.to_dict()
finally:
await self.rm_running_repo(repo.id)
return build_info
async def _process_info(self, build, repo, info):
""" Method used to process information sent by
the build server about an in progress build.
:param build: The build that is being executed
:param repo: The repository that owns the build.
:param info: A dictionary. The information sent by the
slave that is executing the build.
"""
# if we need one more conditional here is better to use
# a map...
if info['info_type'] == 'build_info':
await self._process_build_info(build, repo, info)
elif info['info_type'] == 'step_info':
await self._process_step_info(build, repo, info)
else:
await self._process_step_output_info(build, repo, info)
async def _process_build_info(self, build, repo, build_info):
build.status = build_info['status']
build.started = string2datetime(build_info['started'])
finished = build_info['finished']
if finished:
build.finished = string2datetime(finished)
build.total_time = (build.finished - build.started).seconds
await build.update()
if not build.finished:
msg = 'build started at {}'.format(build_info['started'])
self.log(msg)
build_started.send(str(repo.id), build=build)
await build.notify('build-started')
else:
msg = 'build finished at {} with status {}'.format(
build_info['finished'], build.status)
self.log(msg)
build_finished.send(str(repo.id), build=build)
step = build.steps[-1]
status = build_info['steps'][-1]['status']
finished = build_info['steps'][-1]['finished']
await self._fix_last_step_status(build, step, status, finished)
await build.notify('build-finished')
async def _process_step_info(self, build, repo, step_info):
cmd = step_info['cmd']
name = step_info['name']
status = step_info['status']
output = step_info['output']
started = step_info['started']
finished = step_info['finished']
index = step_info['index']
uuid = step_info['uuid']
if finished:
self._step_finished[uuid] = True
msg = 'step {} {} finished at {} with status {}'.format(
cmd, uuid, finished, status)
self.log(msg, level='debug')
requested_step = await self._get_step(build, uuid)
requested_step.status = status
if requested_step.status == 'exception':
requested_step.output = output if not requested_step.output \
else requested_step.output + output
else:
requested_step.output = output
requested_step.finished = string2datetime(finished)
requested_step.total_time = step_info['total_time']
await build.update()
step_finished.send(str(repo.id), build=build, step=requested_step)
msg = requested_step.to_dict()
msg.update({'repository_id': str(repo.id),
'event_type': 'step-finished'})
await notifications.publish(msg)
else:
requested_step = BuildStep(repository=repo, name=name, command=cmd,
status=status, output=output,
started=string2datetime(started),
index=index, uuid=uuid)
build.steps.append(requested_step)
await build.update()
msg = 'step {} started at {}'.format(requested_step.command,
started)
self.log(msg, level='debug')
step_started.send(str(repo.id), build=build, step=requested_step)
msg = requested_step.to_dict()
msg.update({'repository_id': str(repo.id),
'event_type': 'step-started'})
await notifications.publish(msg)
if step_info.get('last_step_status'):
last_step = build.steps[-2]
status = step_info.get('last_step_status')
finished = step_info.get('last_step_finished')
await self._fix_last_step_status(build, last_step,
status, finished)
async def _fix_last_step_status(self, build, step, status, finished):
# this fixes the bug with the status of the step that
# in someway was getting lost here in the slave.
step.status = status
step.finished = string2datetime(finished)
await build.update()
async def _update_build_step_info(self, build, step_info):
# we need this cache here to avoid excessive memory consumption
# if we try to update the step output every time a line arrives.
output = step_info['output']
uuid = step_info['uuid']
self._step_output_cache[uuid].append(output)
now = time.time()
if not self._step_output_cache_time[uuid]:
self._step_output_cache_time[
uuid] = now + self._step_output_cache_limit
is_updating = self._step_output_is_updating[uuid]
if self._step_output_cache_time[uuid] >= now or is_updating:
return False
self._step_output_is_updating[uuid] = True
step = await self._get_step(build, uuid, wait=True)
# the thing here is that while we are waiting for the step,
# the step may have finished, so we don'to anything in this case.
if self._step_finished[uuid]:
self.log('Step {} already finished. Leaving...'.format(uuid),
level='debug')
del self._step_output_cache[uuid]
return False
output = [step.output or ''] + self._step_output_cache[uuid]
step.output = ''.join(output)
del self._step_output_is_updating[uuid]
del self._step_output_cache[uuid]
del self._step_output_cache_time[uuid]
await build.update()
return True
async def _process_step_output_info(self, build, repo, info):
uuid = info['uuid']
msg = 'step_output_arrived for {}'.format(uuid)
self.log(msg, level='debug')
info['repository'] = {'id': str(repo.id)}
info['build'] = {'uuid': str(build.uuid),
'repository': {'id': str(repo.id)}}
info['output'] = info['output'] + '\n'
step_output_arrived.send(str(repo.id), step_info=info)
await self._update_build_step_info(build, info)
async def _get_step(self, build, step_uuid, wait=False):
"""Returns a step from ``build``. Returns None if the requested
step is not present in the build.
:param build: A :class:`toxicbuild.master.build.Build` instance.
:param step_uuid: The uuid of the requested step.
"""
# this is ridiculous, but the idea of waitig for the step is
# that sometimes a info - ie step_output_info - may arrive here
# before the step started info, so we need to wait a little.
build_inst = build
async def _get():
build = await type(build_inst).get(build_inst.uuid)
build_steps = build.steps
for i, step in enumerate(build_steps):
if str(step.uuid) == str(step_uuid):
build_inst.steps[i] = step
return step
step = await _get()
limit = 20
n = 0
while not step and wait:
await asyncio.sleep(0.001)
step = await _get()
n += 1
if n >= limit:
wait = False
return step
| agpl-3.0 | 5,881,850,398,815,286,000 | 36.494737 | 79 | 0.57828 | false |
ekwan/PyQuiver | src/constants.py | 1 | 5346 | # This file holds physical constants and reads atomic weights.
import sys
import re
import os
import inspect
###############
# Physical Constants
PHYSICAL_CONSTANTS = {
'h' : 6.626070E-34, # Planck's constants in J * s
'c' : 2.997925E+10, # speed of light in units of cm/s
'Eh' : 4.359745E-18, # energy of a hartree in units of J = kg m^2/s^2
'a0' : 5.291772E-11, # bohr radius in m
'atb': 5.291772E-01, # angstroms per bohr
'amu': 1.660468E-27, # atomic mass unit in units kg
'kB' : 1.380649E-23 # Boltzmann's constant in J/K
}
#CM/2.998E10/,EM/1.440E13/,HBC/1.4387/
###############
# Atomic Weight Information
class Element(object):
def __init__(self, full_name, atomic_number, symbol, default_mass):
# the name of this element, like "hydrogen"
full_name = str(full_name)
self.full_name = full_name
if re.match("[^a-z]", full_name):
print("Unexpected non-lowercase character in element name: %s" % full_name)
print("Quitting.")
sys.exit(1)
# the symbol of this element, like "H"
symbol = str(symbol)
self.symbol = symbol
if re.match("[^a-zA-Z]", symbol):
print("Unexpected non-letter character in element symbol: %s" % symbol)
print("Quitting.")
sys.exit(1)
if len(symbol) < 1 or len(symbol) > 2:
print("Unexpected length of element symbol (must be 1 or 2): %s" % symbol)
print("Quitting.")
sys.exit(1)
# the atomic number of this element, like 1
atomic_number = int(atomic_number)
self.atomic_number = atomic_number
if atomic_number < 1 or atomic_number > 200:
print("Unexpected atomic number: %d" % atomic_number)
print("Quitting.")
sys.exit(1)
# the average weight for this element, like 1.00783
default_mass = float(default_mass)
self.default_mass = default_mass
if default_mass < 0.0 or default_mass > 500.0:
print("Unexpected default mass: %d" % default_mass)
print("Quitting.")
sys.exit(1)
# pairs of tuples strings (like "2H") to masses (like 2.0141)
self.replacements = []
def __str__(self):
string = "%s (%s, Z=%d, default mass = %.4f" % (self.full_name.capitalize(), self.symbol, self.atomic_number, self.default_mass)
if len(self.replacements) == 0:
string += ", no isotopic replacements possible)\n"
else:
string += ")\n"
for s,m in self.replacements:
string += " %2s : %.4f\n" % (s,m)
return string[:-1]
def add_replacement(self, symbol, mass):
symbol = str(symbol)
if re.match("[^a-zA-Z0-9]", symbol):
print("Unexpected non-letter character in isotopic replacement symbol: %s" % symbol)
print("Quitting.")
sys.exit(1)
if len(symbol) < 1 or len(symbol) > 4:
print("Unexpected length of element symbol in replacement (must be 1-4 inclusive, found %d): %s" % (len(symbol), symbol))
print("Quitting.")
sys.exit(1)
for s,m in self.replacements:
if s == symbol:
print("Must use a unique symbol for every isotopic replacement: %s" % s)
sys.exit(1)
mass = float(mass)
if mass < 0.0 or mass > 500.0:
print("Unexpected isotopic replacement mass: %f" % mass)
sys.exit(1)
self.replacements.append((symbol,mass))
# read in atomic weight data
elements = []
root = os.path.split(os.path.abspath(__file__))[0]
for line in open(root + "/weights.dat", "r"):
# ignore comments and blank lines
line = line.strip()
if len(line) == 0 or line[0] == "#":
continue
line = line.split("#",1)[0]
# parse
fields = line.split(",") #line.encode("ascii","ignore").split(",")
if len(fields) < 4:
print("Error: not enough data on this line of weights.dat:")
print(line)
print("\nQuitting.")
sys.exit(1)
element = Element(*fields[0:4])
if (len(fields)-4) % 2 != 0:
print("Unexpected number of isotopic replacement fields on this line of weights.dat.")
print("The number of fields after the first four must be a multiple of 2 (found %d)." % (len(fields)-4))
print(line)
print("\nQuitting.")
sys.exit(1)
if (len(fields) > 4):
for i in range(4, len(fields), 2):
element.add_replacement(fields[i], fields[i+1])
elements.append(element)
#print element
print("Read atomic weight data for %d elements." % len(elements))
# map from atomic number to default masses
DEFAULT_MASSES = { e.atomic_number : e.default_mass for e in elements }
# map from valid isotopic replacements to masses
REPLACEMENTS = {}
for e in elements:
for replacement,mass in e.replacements:
REPLACEMENTS[replacement] = mass
# map from isotopic replacements to atomic numbers
REPLACEMENTS_Z = {}
for e in elements:
for replacement,mass in e.replacements:
REPLACEMENTS_Z[replacement]=e.atomic_number
# threshold to separate linear molecules from non-linear molecules
LINEARITY_THRESHOLD = 1e-06
DROP_NUM_LINEAR = 5
# DROP_NUM_NONLINEAR = 6
| apache-2.0 | -5,842,683,066,656,835,000 | 35.121622 | 136 | 0.589787 | false |
kvidoo/MMexUpdater | mmupdater/MMexCategoryUpdater.py | 1 | 1514 | '''
Created on Aug 30, 2013
@author: novpa01
'''
import logging
import sys
import importlib
from mmupdater.Settings import Settings
from mmupdater.UserError import UserError
from mmupdater.MMexDb import MMexDb
from mmupdater.CategoryAssigner import CategoryAssigner
# Parse settings file
settings = Settings('settings.ini')
if __name__ == '__main__':
try:
# initialize log level
logging.basicConfig(level=settings.loglevel)
# initialize the component to talk to the MMex database
db = MMexDb(settings)
# initialize category assignments
cat_assigner = CategoryAssigner(settings, db)
# get transactions with no categories
transactions = db.get_transactions_with_no_categories()
print("Found " + str(len(transactions)) + " transactions with no category assigned")
# fill-in categories where we can
cat_assigner.assign(transactions)
# get just those transactions that have some category assigned
assigned_transactions = [t for t in transactions if 'CATEGID' in t]
print("Categories found for " + str(len(assigned_transactions)) + " transactions")
# save them to the database
db.update_transactions(assigned_transactions, cat_assigner)
# successful exit
exit(0)
except UserError as e:
sys.stderr.write("ERROR: " + str(e) + '\n')
# non-zero to report error
exit(1)
| mit | 5,669,176,103,942,437,000 | 28.686275 | 92 | 0.645971 | false |
arthursoprano/flow-pattern-map | buttons.py | 1 | 2571 | from ipywidgets import widgets, Layout
continuous_update = True
layout = Layout(width='50%', justify_content ='space-between', align_content='space-around')
ρ_L_button = widgets.FloatSlider(
min=800.0,
max=1200.0,
value=1000.0,
step=0.5,
continuous_update=continuous_update,
orientation='horizontal',
description='Liquid Density [kg/m3]',
layout=layout
)
ρ_G_button = widgets.FloatSlider(
min=0.5,
max=300.0,
value=2.0,
step=0.5,
continuous_update=continuous_update,
orientation='horizontal',
description='Gas Density [kg/m3]',
layout=layout
)
# Viscosity
μ_L_button = widgets.FloatSlider(
min=1e-3,
max=1e-2,
value=1e-3,
step=1e-5,
continuous_update=continuous_update,
orientation='horizontal',
description='Liquid Viscosity [Pa.s]',
readout_format='.2e',
layout=layout
)
μ_G_button = widgets.FloatSlider(
min=1e-5,
max=1e-3,
value=1e-5,
step=1e-6,
continuous_update=continuous_update,
orientation='horizontal',
description='Gas Viscosity [Pa.s]',
readout_format='.2e',
layout=layout
)
# Surface Tension
σ_button = widgets.FloatSlider(
min=0.01,
max=0.10,
value=0.07,
step=0.001,
continuous_update=continuous_update,
orientation='horizontal',
description='Surface Tension [N/m]',
layout=layout
)
# Pipe Diameter
D_button = widgets.FloatSlider(
min=0.001,
max=0.60,
value=0.1,
step=0.001,
continuous_update=continuous_update,
orientation='horizontal',
description='Pipe Diameter [m]',
readout_format='.3f',
layout=layout
)
# Pipe Inclination
θ_button = widgets.FloatSlider(
min=0.0,
max=90.0,
value=0.0,
step=0.01,
continuous_update=continuous_update,
orientation='horizontal',
description='Pipe Inclination [deg]',
layout=layout
)
# Pipe Roughness
k_s_button = widgets.FloatSlider(
min=0.0,
max=1e-3,
value=1e-5,
step=1e-6,
continuous_update=continuous_update,
description='Pipe Roughness [m]',
readout_format='.2e',
layout=layout
)
interface_button = widgets.Dropdown(
options={'Smooth': 1, 'Wavy': 2},
value=1,
description='Interface:',
layout=layout
)
plot_mesh_button = widgets.RadioButtons(
options=['True', 'False'],
value='False',
description='Plot grid points:',
disabled=False
)
| mit | -1,499,138,884,504,016,100 | 20.094828 | 93 | 0.609442 | false |
FSUgenomics/TFLOW | tflow/segments/BUSCO_Analysis.py | 1 | 22978 | #TFLOW Segment: Analyze FASTA File for Gene Recapture using BUSCO Benchmark Database
#
#Dan Stribling
#Florida State University
#Center for Genomics and Personalized Medicine
#Version 0.9, 04/20/2015
#Project URL: http://www.github.com/fsugenomics/tflow
import os.path
import sys
import subprocess
import shutil
BUSCO_FILES = {'arthropoda':'BUSCO_Arthropoda.fas',
'vertebrata':'BUSCO_Vertebrata.fas',
'fungi':'BUSCO_Fungi.fas',
'metazoa':'BUSCO_Metazoa.fas'}
if __name__ == "__main__" or __package__ is None:
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../'))
import tflow.segments
__package__ = "tflow.segments"
from .parser_class import OutputParser
from ..util import (print_exit, print_except, write_file, write_report, delete_pid_file,
percent_string, lowercase, stop_TFLOW_process)
from .. import util
from .. import local_settings
if hasattr(local_settings, 'BUSCO_LOCATION'):
BUSCO_LOCATION = local_settings.BUSCO_LOCATION
else:
BUSCO_LOCATION = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..',
'sequence_files')
if hasattr(local_settings, 'BLAST_LOCATION'):
BLAST_LOCATION = local_settings.BLAST_LOCATION
else:
BLAST_LOCATION = ''
if hasattr(local_settings, 'BLAST_EXEC'):
BLAST_EXEC = local_settings.BLAST_EXEC
else:
BLAST_EXEC = os.path.join(BLAST_LOCATION, 'blastx')
if hasattr(local_settings, 'MAKE_BLAST_DB_LOCATION'):
MAKE_BLAST_DB_LOCATION = local_settings.MAKE_BLAST_DB_LOCATION
else:
MAKE_BLAST_DB_LOCATION = ''
if hasattr(local_settings, 'MAKE_BLAST_DB_EXEC'):
MAKE_BLAST_DB_EXEC = local_settings.MAKE_BLAST_DB_EXEC
else:
MAKE_BLAST_DB_EXEC = os.path.join(MAKE_BLAST_DB_LOCATION, 'makeblastdb')
JOB_TYPE = 'BUSCO_Analysis'
PROGRAM_URL = 'http://blast.ncbi.nlm.nih.gov/Blast.cgi?PAGE_TYPE=BlastDocs&DOC_TYPE=Download'
DATASET_DETAILS = ''' Benchmarking sets of Universal Single-Copy Orthologs (BUSCO): Metazoa, Arthropoda,
Vertebrata, and Fungi Datasets
Version: OrthoDB7, Acquired 2015-04-22
URL: ftp://cegg.unige.ch/OrthoDB7/BUSCO/
Citation: Waterhouse et al, Nucleic Acids Research, January 2013, PMID:23180791
OrthoDB: a hierarchical catalog of animal, fungal and bacterial orthologs.
'''
SEGMENT_FOR_VERSION = '2.2.29'
BLAST_COMMAND = BLAST_EXEC
BLAST_COMMAND_LIST = [BLAST_COMMAND]
BLAST_DB_COMMAND = MAKE_BLAST_DB_EXEC
BLAST_DB_COMMAND_LIST = [BLAST_DB_COMMAND]
TEST_COMMAND = '-h'
OUT_FILE = 'BUSCO_Analysis.out'
MILESTONES = ['BUSCO Benchmarking Analysis Complete']
TERMINAL_FLAGS = ['BUSCO Analysis Done']
FAILURE_FLAGS = ['Exiting Early...',
'Traceback',
'Not Found',
'Exception: ERROR',
]
DEFAULT_SETTINGS = {'working_directory':'BUSCO_Analysis',
'BUSCO_type':'vertebrata',
'BUSCO_location':BUSCO_LOCATION,
'copy_input_file':True,
'max_CPU':'4',
'evalue':'1e-5',
'evalue_cutoff':'1e-20',
'blast_result_file':'blast.out',
'print_missing_genes':False,
'print_matches':False,
#TFLOW BUSCO_Analysis Settings
'blast_command':BLAST_COMMAND,
'blast_command_list':BLAST_COMMAND_LIST,
'blast_db_command':BLAST_DB_COMMAND,
'blast_db_command_list':BLAST_DB_COMMAND_LIST,
'test_command':TEST_COMMAND,
'program_URL':PROGRAM_URL,
'segment_for_version':SEGMENT_FOR_VERSION,
'dataset_details':DATASET_DETAILS,
#TFLOW Writing Defaults, Used if Global Not Set
'write_report':True,
'write_command':True,
'write_pid':True,
}
REQUIRED_SETTINGS = ['blast_command_list', 'blast_db_command_list', 'working_directory',
'copy_input_file', 'evalue', 'max_CPU', 'blast_result_file', 'evalue_cutoff',
'print_missing_genes', 'write_command', 'write_report', 'write_pid',
'print_matches']
REQUIRED_ANALYSIS_SETTINGS = ['blast_result_file', 'evalue_cutoff', 'print_missing_genes',
'print_matches', 'write_report']
class Parser(OutputParser):
def set_local_defaults(self):
self.milestones = MILESTONES
self.terminal_flags = TERMINAL_FLAGS
self.failure_flags = FAILURE_FLAGS
self.job_type = JOB_TYPE
def check_done(options):
parser = Parser()
parser.out_file = options['out_file']
failure_exit = (options['mode'] in ['run', 'track'])
return parser.check_completion(failure_exit)
def track(options):
parser = Parser()
parser.out_file = options['out_file']
parser.track()
def read(options):
parser = Parser()
parser.out_file = options['out_file']
parser.read_or_notify()
def stop(options):
job_pid_file = os.path.join(options['working_directory'],
JOB_TYPE + '.auto.pid')
stop_TFLOW_process(job_pid_file, JOB_TYPE)
def clean(options):
files = ['BUSCO_Make_DB.auto.sh', 'BUSCO_tblastn.auto.sh']
for BUSCO_type in ['Arthropoda', 'Vertebrata', 'Metazoa', 'Fungi']:
for suffix in ['.pin', '.psq', '.phr']:
files.append('BUSCO_' + BUSCO_type + suffix)
if options['copy_input_file']:
for file_type in ['absolute_input_analysis_file', 'rel_input_analysis_file']:
if file_type in options:
files.append(os.path.basename(os.path.join(options['working_directory'],
options[file_type])))
break
out_files = [options['blast_result_file']]
remove_outfile = (options['mode'] == 'reset')
util.clean_TFLOW_auto_files(options['job_type'], options['project_directory'],
options['working_directory'], remove_outfile=remove_outfile,
confirm=options['confirm'], files=files, out_files=out_files)
def test(options, silent=False):
all_output = ''
for job_type, command_list in [(JOB_TYPE+':BLAST', 'blast_command_list'),
(JOB_TYPE+':Make_Blast_DB', 'blast_db_command_list')]:
try:
process = subprocess.Popen(options[command_list] + [options['test_command']],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
process.wait()
output, error = process.communicate()
all_output += output
print ' -- %s Found!' % job_type
except OSError as error:
if silent:
return False
print ('%s Cannot Be Found ' % job_type
+ ' With Shell Command: "%s"' % ' '.join(options[command_list]))
if PROGRAM_URL:
print 'If Not Installed, %s Can be Downloaded From:\n%s' % (JOB_TYPE, PROGRAM_URL)
all_output += 'Error Number: %s\nError Text:\n%s' % (str(error.errno), error.strerror)
return all_output
def run(options):
if __name__ != '__main__' and options['is_pipe']:
out_file_stream = open(options['out_file'], 'w')
terminal_out, terminal_error = sys.stdout, sys.stderr
sys.stdout, sys.stderr = out_file_stream, out_file_stream
#Ensure Required Settings in Options
for required_option in REQUIRED_SETTINGS:
if required_option not in options:
print_exit('Required Option: %s for %s not given.' % (required_option, JOB_TYPE))
#Ensure A Type of Input File is Given
if not any(x in options for x in ['absolute_input_analysis_file',
'rel_input_analysis_file',
'result_name_file']):
print_exit('Either absolute_input_analysis_file, rel_input_analysis_file, or'
+ ' result_name_file paramater required.')
#Ensure a BUSCO file or type is given
if not any(x in options for x in ['BUSCO_file', 'BUSCO_type']):
print_exit('Either BUSCO_file or BUSCO_type paramater required.')
#Ensure Working Directory Exists
if not os.path.isdir(options['working_directory']):
print_exit('Working Directory: %s Not Found.' % options['working_directory'])
#Print Dataset Details
if 'dataset_details' in options:
print 'Details on Benchmarking Dataset:'
print options['dataset_details']
print ''
#Assign Correct Input File Name
if 'absolute_input_analysis_file' in options:
full_input_file = options['absolute_input_analysis_file']
input_file = os.path.basename(full_input_file)
elif 'rel_input_analysis_file' in options:
full_input_file = os.path.join(options['project_directory'],
options['rel_input_analysis_file'])
input_file = os.path.basename(options['rel_input_analysis_file'])
elif 'result_name_file' in options:
full_result_name_file = os.path.join(options['project_directory'],
options['result_name_file'])
if os.path.isfile(full_result_name_file):
print ('Reading Result Sequence File Name from Provided '
+ 'File: %s' % full_result_name_file )
else:
print_exit('Provided File: %s Containing' % full_result_name_file
+ ' Result Sequence File Name Not Found.')
rf = open(full_result_name_file, 'r')
full_input_file = rf.read().strip()
rf.close()
if os.path.isfile(full_input_file):
print 'Read Result Sequence File Name: %s' % full_input_file
print 'File Found!'
print ''
else:
print_exit('Cannot Find Read Result Sequence File: %s' % full_input_file)
input_file = os.path.basename(full_input_file)
#Find/Validate BUSCO File Selection
if 'BUSCO_file' in options:
full_BUSCO_file_name = options['BUSCO_file']
print 'BUSCO File: %s Given.' % options['BUSCO_file']
elif 'BUSCO_type' in options:
if 'BUSCO_location' not in options or not options['BUSCO_location']:
print_exit('BUSCO_type: %s Given ' % options['BUSCO_type']
+ 'but BUSCO_location not given.')
if not os.path.isdir(options['BUSCO_location']):
print_exit('BUSCO File Location: %s Not Found.' % options['BUSCO_location'])
BUSCO_type = lowercase(options['BUSCO_type'])
if BUSCO_type in BUSCO_FILES:
print 'BUSCO File Type: %s Provided.' % BUSCO_type
full_BUSCO_file_name = os.path.join(options['BUSCO_location'],
BUSCO_FILES[BUSCO_type])
else:
print_exit([('Selected BUSCO Type: %s Not Available.' % BUSCO_Type),
'Please Select from Types:', ', '.join(BUSCO_FILES.keys())])
#If Selected BUSCO File Not Yet Unzipped, Unzip it
if not os.path.isfile(full_BUSCO_file_name) and os.path.isfile(full_BUSCO_file_name +'.gz'):
print ('\nSelected BUSCO File: %s' % full_BUSCO_file_name
+ 'Found in Zipped Format: %s' % full_BUSCO_file_name + '.gz')
print 'Unzipping...'
print ''
sys.stdout.flush()
with gzip.open(full_BUSCO_file_name + '.gz', 'r') as zipped_BUSCO, \
open(full_BUSCO_file_name, 'w') as unzipped_BUSCO:
unzipped_BUSCO.writelines(zipped_BUSCO)
#Ensure Provided/Selected BUSCO File Exists
if os.path.isfile(full_BUSCO_file_name):
print 'Selected BUSCO File Found: %s' % full_BUSCO_file_name
if 'BUSCO_file' not in options:
options['BUSCO_file'] = full_BUSCO_file_name
else:
print_exit('Selected BUSCO File: %s Cannot Be Found.' % full_BUSCO_file_name)
#Check that Input File Exists
if not os.path.isfile(full_input_file):
print_exit('Input File: %s Not Found.' % full_input_file)
#If Selected, Copy Input File to Working Directory
if options['copy_input_file']:
print ('Copying Input File: %s' % input_file
+ ' to Working Directory: %s' % options['working_directory'])
working_input_file = os.path.join(options['working_directory'], input_file)
shutil.copyfile(full_input_file, working_input_file)
if not os.path.isfile(working_input_file):
print_exit('Copying of File: %s to Name: %s Unsuccesful.' % (full_input_file,
working_input_file))
else:
print 'Using Input File: %s' % full_input_file
working_input_file = full_input_file
#Prepare Blast Database Name
if 'BUSCO_type' in options:
title='BUSCO_' + options['BUSCO_type'].title()
else:
BUSCO_file_name = os.path.basename(options['BUSCO_file'])
if BUSCO_file_name in BUSCO_FILES.values():
for name in BUSCO_FILES:
if BUSCO_file_name == BUSCO_FILES[name]:
title = 'BUSCO_' + name.title()
break
else:
title = 'BUSCO'
#Prepare Blast Database
db_command_list = options['blast_db_command_list'][:]
db_command_list += ['-in', full_BUSCO_file_name, '-dbtype', 'prot', '-title', title,
'-out', title]
db_command = ' '.join(db_command_list)
if options['write_command']:
command_file = os.path.join(options['working_directory'],
'BUSCO_Make_DB.auto.sh')
write_file(command_file, '#!/bin/sh\n' + db_command)
print ''
print 'Running Command:\n ' + db_command
sys.stdout.flush()
try:
process = subprocess.Popen(db_command_list, stdout=sys.stdout, stderr=sys.stderr,
cwd=options['working_directory'])
if options['write_pid']:
pid_file_name = os.path.join(options['working_directory'],
options['job_type'] + '.auto.pid')
write_file(pid_file_name, str(process.pid))
process.wait()
if options['write_pid']:
delete_pid_file(pid_file_name)
sys.stdout.flush()
except KeyboardInterrupt:
if __name__ != '__main__' and options['is_pipe']:
sys.stdout, sys.stderr = terminal_out, terminal_error
out_file_stream.close()
print 'Killing %s Process.' % JOB_TYPE
process.kill()
raise
#Prepare BLAST Sequence Comparison Command
command_list = list(options['blast_command_list'])
command_list += ['-db', title, '-query', full_input_file, '-outfmt', '6', '-evalue',
options['evalue'], '-num_threads', options['max_CPU'], '-out',
options['blast_result_file']]
command = ' '.join(command_list)
#If Selected, Write Command to File
if options['write_command']:
command_file = os.path.join(options['working_directory'], 'BUSCO_blastx.auto.sh')
write_file(command_file, '#!/bin/sh\n' + command)
#Perform BLAST Sequence Comparisons
print ''
print 'Running Command:\n ' + command
sys.stdout.flush()
try:
process = subprocess.Popen(command_list, stdout=sys.stdout, stderr=sys.stderr,
cwd=options['working_directory'])
if options['write_pid']:
pid_file_name = os.path.join(options['working_directory'],
options['job_type'] + '.auto.pid')
write_file(pid_file_name, str(process.pid))
process.wait()
if options['write_pid']:
delete_pid_file(pid_file_name)
sys.stdout.flush()
except KeyboardInterrupt:
if __name__ != '__main__' and options['is_pipe']:
sys.stdout, sys.stderr = terminal_out, terminal_error
out_file_stream.close()
print 'Killing %s Process.' % JOB_TYPE
process.kill()
raise
print ''
print 'Blast Completed with Out File: %s' % options['blast_result_file']
print ''
analyze(options)
print ''
print 'BUSCO Benchmarking Analysis Complete'
if __name__ != '__main__' and options['is_pipe']:
sys.stdout, sys.stderr = terminal_out, terminal_error
out_file_stream.close()
#Analyze Results of Sequence Comparison
def analyze(options):
analysis = 'Analyzing BUSCO Recapture BLAST Result.\n\n'
#Ensure Required Settings in Options
for required_option in REQUIRED_ANALYSIS_SETTINGS:
if required_option not in options:
print_exit('Required Option: %s for %s not given.' % (required_option, JOB_TYPE))
#Ensure a BUSCO file or type is given
if not any(x in options for x in ['BUSCO_file', 'BUSCO_type']):
print_exit('Either BUSCO_file or BUSCO_type paramater required.')
#Ensure Working Directory Exists
if not os.path.isdir(options['working_directory']):
print_exit('Working Directory: %s Not Found.' % options['working_directory'])
#Find/Validate BUSCO File Selection
if 'BUSCO_file' in options:
full_BUSCO_file_name = options['BUSCO_file']
print 'BUSCO File: %s Given.' % options['BUSCO_file']
elif 'BUSCO_type' in options:
if 'BUSCO_location' not in options or not options['BUSCO_location']:
print_exit('BUSCO_type: %s Given ' % options['BUSCO_type']
+ 'but BUSCO_location not given.')
if not os.path.isdir(options['BUSCO_location']):
print_exit('BUSCO File Location: %s Not Found.' % options['BUSCO_location'])
BUSCO_type = lowercase(options['BUSCO_type'])
if BUSCO_type in BUSCO_FILES:
print 'BUSCO File Type: %s Provided.' % BUSCO_type
full_BUSCO_file_name = os.path.join(options['BUSCO_location'],
BUSCO_FILES[BUSCO_type])
else:
print_exit([('Selected BUSCO Type: %s Not Available.' % BUSCO_Type),
'Please Select from Types:', ', '.join(BUSCO_FILES.keys())])
#Ensure Provided/Selected BUSCO File Exists
if os.path.isfile(full_BUSCO_file_name):
print 'Selected BUSCO File Found: %s' % full_BUSCO_file_name
else:
print_exit('Selected BUSCO File: %s Cannot Be Found.' % full_BUSCO_file_name)
full_blast = os.path.join(options['working_directory'], options['blast_result_file'])
#Ensure Blast Output File Exists
if not os.path.isfile(full_blast):
print_exit('Blast Result File: %s Not Found.' % full_blast)
analysis = '\nAnalyzing Blast Result File %s\n' % full_blast
analysis += ' With BUSCO file: %s\n' % full_BUSCO_file_name
#Read Expected Genes
BUSCO_sequences = {}
genes = {}
BUSCO_file = open(full_BUSCO_file_name, 'r')
for line in BUSCO_file:
if line.startswith('>'):
split_line = line.lstrip('>').split()
sequence = split_line[0]
gene = split_line[1]
BUSCO_sequences[sequence] = gene
genes[gene] = False
BUSCO_file.close()
expected_gene_count = len(genes)
analysis += '\nExpected Genes: %i\n' % expected_gene_count
cutoff_float = float(options['evalue_cutoff'])
#Read Blast File Outputs and Count Genes Found Over Threshold
blast_file = open(full_blast, 'r')
for (line_number, line) in enumerate(blast_file, start=1):
split_line = line.split()
if not split_line:
print_exit('Blank Line Found in Blast Results File at Line Number %i' % line_number)
elif len(split_line) < 11:
print_exit([('Problem with formatting of line number %i ' % line_number
+ 'in blast results file: %s' % full_blast), 'Line:', line.strip()])
sequence = split_line[0]
BUSCO_sequence = split_line[1]
if BUSCO_sequence in BUSCO_sequences:
gene = BUSCO_sequences[BUSCO_sequence]
else:
print_except(['Unexpected BUSCO Sequence Hit: %s Found.' % BUSCO_sequence,
'Cannot Identify Gene.'])
e_score_string = split_line[10]
e_score = float(e_score_string)
#Mark Gene as Present if Hit Exists over Threshold Value
if e_score <= cutoff_float:
if options['print_matches'] and not genes[gene]:
analysis += 'Match: %s %s %s %s\n' % (sequence, BUSCO_sequence, gene, e_score_string)
genes[gene] = True
#Count Number of Found and Missing Genes
found_gene_count = 0
missing_genes = []
for gene in genes:
if genes[gene]:
found_gene_count += 1
else:
missing_genes.append(gene)
missing_gene_count = len(missing_genes)
#Ensure that Found/Missing Genes Sums to Expected Total
if missing_gene_count + found_gene_count != expected_gene_count:
print_except('PROBLEM!, Found: %i + ' % found_gene_count
+ 'Missing: %i Genes != Expected: %i' % (missing_gene_count,
expected_gene_count))
#Report Results
analysis += 'Genes Found: %i\n' % found_gene_count
analysis += 'Genes Missing: %i\n' % missing_gene_count
if options['print_missing_genes'] and missing_genes:
analysis += 'Missing Genes: ' + ' '.join(missing_genes) + '\n'
percent = percent_string(found_gene_count, expected_gene_count)
analysis += 'Percent BUSCO Genes Present: %s\n' % percent
headers = ['Analys.', 'Cutoff', 'Expect.', 'Found', 'Missing', 'Total', 'Percent']
data_grid = ['BUSCO', options['evalue_cutoff'], expected_gene_count, found_gene_count,
missing_gene_count, expected_gene_count, percent]
formatted_data = [str(x) for x in data_grid]
analysis += '\n'
analysis += 'Tab Separated Output:\n'
analysis += '\t'.join(headers) + '\n'
analysis += '\t'.join(formatted_data) + '\n'
report_dict = dict(zip(headers, formatted_data))
report_dict['report_type'] = 'recapture'
#If Selected, Write Analysis Report
if options['write_report']:
report_file = os.path.join(options['working_directory'],
JOB_TYPE + '.report')
write_report(report_file, report_dict)
print analysis
return analysis
| gpl-2.0 | -9,137,028,453,921,034,000 | 39.813499 | 104 | 0.581817 | false |
infinity0n3/python-fabtotum | fabtotum/loaders/gerber/excellon_statements.py | 1 | 31011 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# copyright 2014 Hamilton Kibbe <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Excellon Statements
====================
**Excellon file statement classes**
"""
import re
import uuid
from .utils import (parse_gerber_value, write_gerber_value, decimal_string,
inch, metric)
__all__ = ['ExcellonTool', 'ToolSelectionStmt', 'CoordinateStmt',
'CommentStmt', 'HeaderBeginStmt', 'HeaderEndStmt',
'RewindStopStmt', 'EndOfProgramStmt', 'UnitStmt',
'IncrementalModeStmt', 'VersionStmt', 'FormatStmt', 'LinkToolStmt',
'MeasuringModeStmt', 'RouteModeStmt', 'LinearModeStmt', 'DrillModeStmt',
'AbsoluteModeStmt', 'RepeatHoleStmt', 'UnknownStmt',
'ExcellonStatement', 'ZAxisRoutPositionStmt',
'RetractWithClampingStmt', 'RetractWithoutClampingStmt',
'CutterCompensationOffStmt', 'CutterCompensationLeftStmt',
'CutterCompensationRightStmt', 'ZAxisInfeedRateStmt',
'NextToolSelectionStmt', 'SlotStmt']
class ExcellonStatement(object):
""" Excellon Statement abstract base class
"""
@classmethod
def from_excellon(cls, line):
raise NotImplementedError('from_excellon must be implemented in a '
'subclass')
def __init__(self, unit='inch', id=None):
self.units = unit
self.id = uuid.uuid4().int if id is None else id
def to_excellon(self, settings=None):
raise NotImplementedError('to_excellon must be implemented in a '
'subclass')
def to_inch(self):
self.units = 'inch'
def to_metric(self):
self.units = 'metric'
def offset(self, x_offset=0, y_offset=0):
pass
def __eq__(self, other):
return self.__dict__ == other.__dict__
class ExcellonTool(ExcellonStatement):
""" Excellon Tool class
Parameters
----------
settings : FileSettings (dict-like)
File-wide settings.
kwargs : dict-like
Tool settings from the excellon statement. Valid keys are:
- `diameter` : Tool diameter [expressed in file units]
- `rpm` : Tool RPM
- `feed_rate` : Z-axis tool feed rate
- `retract_rate` : Z-axis tool retraction rate
- `max_hit_count` : Number of hits allowed before a tool change
- `depth_offset` : Offset of tool depth from tip of tool.
Attributes
----------
number : integer
Tool number from the excellon file
diameter : float
Tool diameter in file units
rpm : float
Tool RPM
feed_rate : float
Tool Z-axis feed rate.
retract_rate : float
Tool Z-axis retract rate
depth_offset : float
Offset of depth measurement from tip of tool
max_hit_count : integer
Maximum number of tool hits allowed before a tool change
hit_count : integer
Number of tool hits in excellon file.
"""
PLATED_UNKNOWN = None
PLATED_YES = 'plated'
PLATED_NO = 'nonplated'
PLATED_OPTIONAL = 'optional'
@classmethod
def from_tool(cls, tool):
args = {}
args['depth_offset'] = tool.depth_offset
args['diameter'] = tool.diameter
args['feed_rate'] = tool.feed_rate
args['max_hit_count'] = tool.max_hit_count
args['number'] = tool.number
args['plated'] = tool.plated
args['retract_rate'] = tool.retract_rate
args['rpm'] = tool.rpm
return cls(None, **args)
@classmethod
def from_excellon(cls, line, settings, id=None, plated=None):
""" Create a Tool from an excellon file tool definition line.
Parameters
----------
line : string
Tool definition line from an excellon file.
settings : FileSettings (dict-like)
Excellon file-wide settings
Returns
-------
tool : Tool
An ExcellonTool representing the tool defined in `line`
"""
commands = re.split('([BCFHSTZ])', line)[1:]
commands = [(command, value) for command, value in pairwise(commands)]
args = {}
args['id'] = id
nformat = settings.format
zero_suppression = settings.zero_suppression
for cmd, val in commands:
if cmd == 'B':
args['retract_rate'] = parse_gerber_value(val, nformat, zero_suppression)
elif cmd == 'C':
args['diameter'] = parse_gerber_value(val, nformat, zero_suppression)
elif cmd == 'F':
args['feed_rate'] = parse_gerber_value(val, nformat, zero_suppression)
elif cmd == 'H':
args['max_hit_count'] = parse_gerber_value(val, nformat, zero_suppression)
elif cmd == 'S':
args['rpm'] = 1000 * parse_gerber_value(val, nformat, zero_suppression)
elif cmd == 'T':
args['number'] = int(val)
elif cmd == 'Z':
args['depth_offset'] = parse_gerber_value(val, nformat, zero_suppression)
if plated != ExcellonTool.PLATED_UNKNOWN:
# Sometimees we can can parse the plating status
args['plated'] = plated
return cls(settings, **args)
@classmethod
def from_dict(cls, settings, tool_dict):
""" Create an ExcellonTool from a dict.
Parameters
----------
settings : FileSettings (dict-like)
Excellon File-wide settings
tool_dict : dict
Excellon tool parameters as a dict
Returns
-------
tool : ExcellonTool
An ExcellonTool initialized with the parameters in tool_dict.
"""
return cls(settings, **tool_dict)
def __init__(self, settings, **kwargs):
if kwargs.get('id') is not None:
super(ExcellonTool, self).__init__(id=kwargs.get('id'))
self.settings = settings
self.number = kwargs.get('number')
self.feed_rate = kwargs.get('feed_rate')
self.retract_rate = kwargs.get('retract_rate')
self.rpm = kwargs.get('rpm')
self.diameter = kwargs.get('diameter')
self.max_hit_count = kwargs.get('max_hit_count')
self.depth_offset = kwargs.get('depth_offset')
self.plated = kwargs.get('plated')
self.hit_count = 0
def to_excellon(self, settings=None):
if self.settings and not settings:
settings = self.settings
fmt = settings.format
zs = settings.zero_suppression
stmt = 'T%02d' % self.number
if self.retract_rate is not None:
stmt += 'B%s' % write_gerber_value(self.retract_rate, fmt, zs)
if self.feed_rate is not None:
stmt += 'F%s' % write_gerber_value(self.feed_rate, fmt, zs)
if self.max_hit_count is not None:
stmt += 'H%s' % write_gerber_value(self.max_hit_count, fmt, zs)
if self.rpm is not None:
if self.rpm < 100000.:
stmt += 'S%s' % write_gerber_value(self.rpm / 1000., fmt, zs)
else:
stmt += 'S%g' % (self.rpm / 1000.)
if self.diameter is not None:
stmt += 'C%s' % decimal_string(self.diameter, fmt[1], True)
if self.depth_offset is not None:
stmt += 'Z%s' % write_gerber_value(self.depth_offset, fmt, zs)
return stmt
def to_inch(self):
if self.settings.units != 'inch':
self.settings.units = 'inch'
if self.diameter is not None:
self.diameter = inch(self.diameter)
def to_metric(self):
if self.settings.units != 'metric':
self.settings.units = 'metric'
if self.diameter is not None:
self.diameter = metric(self.diameter)
def _hit(self):
self.hit_count += 1
def equivalent(self, other):
"""
Is the other tool equal to this, ignoring the tool number, and other file specified properties
"""
if type(self) != type(other):
return False
return (self.diameter == other.diameter
and self.feed_rate == other.feed_rate
and self.retract_rate == other.retract_rate
and self.rpm == other.rpm
and self.depth_offset == other.depth_offset
and self.max_hit_count == other.max_hit_count
and self.plated == other.plated
and self.settings.units == other.settings.units)
def __repr__(self):
unit = 'in.' if self.settings.units == 'inch' else 'mm'
fmtstr = '<ExcellonTool %%02d: %%%d.%dg%%s dia.>' % self.settings.format
return fmtstr % (self.number, self.diameter, unit)
class ToolSelectionStmt(ExcellonStatement):
@classmethod
def from_excellon(cls, line, **kwargs):
""" Create a ToolSelectionStmt from an excellon file line.
Parameters
----------
line : string
Line from an Excellon file
Returns
-------
tool_statement : ToolSelectionStmt
ToolSelectionStmt representation of `line.`
"""
line = line[1:]
compensation_index = None
# up to 3 characters for tool number (Frizting uses that)
if len(line) <= 3:
tool = int(line)
else:
tool = int(line[:2])
compensation_index = int(line[2:])
return cls(tool, compensation_index, **kwargs)
def __init__(self, tool, compensation_index=None, **kwargs):
super(ToolSelectionStmt, self).__init__(**kwargs)
tool = int(tool)
compensation_index = (int(compensation_index) if compensation_index
is not None else None)
self.tool = tool
self.compensation_index = compensation_index
def to_excellon(self, settings=None):
stmt = 'T%02d' % self.tool
if self.compensation_index is not None:
stmt += '%02d' % self.compensation_index
return stmt
class NextToolSelectionStmt(ExcellonStatement):
# TODO the statement exists outside of the context of the file,
# so it is imposible to know that it is really the next tool
def __init__(self, cur_tool, next_tool, **kwargs):
"""
Select the next tool in the wheel.
Parameters
----------
cur_tool : the tool that is currently selected
next_tool : the that that is now selected
"""
super(NextToolSelectionStmt, self).__init__(**kwargs)
self.cur_tool = cur_tool
self.next_tool = next_tool
def to_excellon(self, settings=None):
stmt = 'M00'
return stmt
class ZAxisInfeedRateStmt(ExcellonStatement):
@classmethod
def from_excellon(cls, line, **kwargs):
""" Create a ZAxisInfeedRate from an excellon file line.
Parameters
----------
line : string
Line from an Excellon file
Returns
-------
z_axis_infeed_rate : ToolSelectionStmt
ToolSelectionStmt representation of `line.`
"""
rate = int(line[1:])
return cls(rate, **kwargs)
def __init__(self, rate, **kwargs):
super(ZAxisInfeedRateStmt, self).__init__(**kwargs)
self.rate = rate
def to_excellon(self, settings=None):
return 'F%02d' % self.rate
class CoordinateStmt(ExcellonStatement):
@classmethod
def from_point(cls, point, mode=None):
stmt = cls(point[0], point[1])
if mode:
stmt.mode = mode
return stmt
@classmethod
def from_excellon(cls, line, settings, **kwargs):
x_coord = None
y_coord = None
if line[0] == 'X':
splitline = line.strip('X').split('Y')
x_coord = parse_gerber_value(splitline[0], settings.format,
settings.zero_suppression)
if len(splitline) == 2:
y_coord = parse_gerber_value(splitline[1], settings.format,
settings.zero_suppression)
else:
y_coord = parse_gerber_value(line.strip(' Y'), settings.format,
settings.zero_suppression)
c = cls(x_coord, y_coord, **kwargs)
c.units = settings.units
return c
def __init__(self, x=None, y=None, **kwargs):
super(CoordinateStmt, self).__init__(**kwargs)
self.x = x
self.y = y
self.mode = None
def to_excellon(self, settings):
stmt = ''
if self.mode == "ROUT":
stmt += "G00"
if self.mode == "LINEAR":
stmt += "G01"
if self.x is not None:
stmt += 'X%s' % write_gerber_value(self.x, settings.format,
settings.zero_suppression)
if self.y is not None:
stmt += 'Y%s' % write_gerber_value(self.y, settings.format,
settings.zero_suppression)
return stmt
def to_inch(self):
if self.units == 'metric':
self.units = 'inch'
if self.x is not None:
self.x = inch(self.x)
if self.y is not None:
self.y = inch(self.y)
def to_metric(self):
if self.units == 'inch':
self.units = 'metric'
if self.x is not None:
self.x = metric(self.x)
if self.y is not None:
self.y = metric(self.y)
def offset(self, x_offset=0, y_offset=0):
if self.x is not None:
self.x += x_offset
if self.y is not None:
self.y += y_offset
def __str__(self):
coord_str = ''
if self.x is not None:
coord_str += 'X: %g ' % self.x
if self.y is not None:
coord_str += 'Y: %g ' % self.y
return '<Coordinate Statement: %s>' % coord_str
class RepeatHoleStmt(ExcellonStatement):
@classmethod
def from_excellon(cls, line, settings, **kwargs):
match = re.compile(r'R(?P<rcount>[0-9]*)X?(?P<xdelta>[+\-]?\d*\.?\d*)?Y?'
'(?P<ydelta>[+\-]?\d*\.?\d*)?').match(line)
stmt = match.groupdict()
count = int(stmt['rcount'])
xdelta = (parse_gerber_value(stmt['xdelta'], settings.format,
settings.zero_suppression)
if stmt['xdelta'] is not '' else None)
ydelta = (parse_gerber_value(stmt['ydelta'], settings.format,
settings.zero_suppression)
if stmt['ydelta'] is not '' else None)
c = cls(count, xdelta, ydelta, **kwargs)
c.units = settings.units
return c
def __init__(self, count, xdelta=0.0, ydelta=0.0, **kwargs):
super(RepeatHoleStmt, self).__init__(**kwargs)
self.count = count
self.xdelta = xdelta
self.ydelta = ydelta
def to_excellon(self, settings):
stmt = 'R%d' % self.count
if self.xdelta is not None and self.xdelta != 0.0:
stmt += 'X%s' % write_gerber_value(self.xdelta, settings.format,
settings.zero_suppression)
if self.ydelta is not None and self.ydelta != 0.0:
stmt += 'Y%s' % write_gerber_value(self.ydelta, settings.format,
settings.zero_suppression)
return stmt
def to_inch(self):
if self.units == 'metric':
self.units = 'inch'
if self.xdelta is not None:
self.xdelta = inch(self.xdelta)
if self.ydelta is not None:
self.ydelta = inch(self.ydelta)
def to_metric(self):
if self.units == 'inch':
self.units = 'metric'
if self.xdelta is not None:
self.xdelta = metric(self.xdelta)
if self.ydelta is not None:
self.ydelta = metric(self.ydelta)
def __str__(self):
return '<Repeat Hole: %d times, offset X: %g Y: %g>' % (
self.count,
self.xdelta if self.xdelta is not None else 0,
self.ydelta if self.ydelta is not None else 0)
class CommentStmt(ExcellonStatement):
@classmethod
def from_excellon(cls, line, **kwargs):
return cls(line.lstrip(';'))
def __init__(self, comment, **kwargs):
super(CommentStmt, self).__init__(**kwargs)
self.comment = comment
def to_excellon(self, settings=None):
return ';%s' % self.comment
class HeaderBeginStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(HeaderBeginStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return 'M48'
class HeaderEndStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(HeaderEndStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return 'M95'
class RewindStopStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(RewindStopStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return '%'
class ZAxisRoutPositionStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(ZAxisRoutPositionStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return 'M15'
class RetractWithClampingStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(RetractWithClampingStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return 'M16'
class RetractWithoutClampingStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(RetractWithoutClampingStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return 'M17'
class CutterCompensationOffStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(CutterCompensationOffStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return 'G40'
class CutterCompensationLeftStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(CutterCompensationLeftStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return 'G41'
class CutterCompensationRightStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(CutterCompensationRightStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return 'G42'
class EndOfProgramStmt(ExcellonStatement):
@classmethod
def from_excellon(cls, line, settings, **kwargs):
match = re.compile(r'M30X?(?P<x>\d*\.?\d*)?Y?'
'(?P<y>\d*\.?\d*)?').match(line)
stmt = match.groupdict()
x = (parse_gerber_value(stmt['x'], settings.format,
settings.zero_suppression)
if stmt['x'] is not '' else None)
y = (parse_gerber_value(stmt['y'], settings.format,
settings.zero_suppression)
if stmt['y'] is not '' else None)
c = cls(x, y, **kwargs)
c.units = settings.units
return c
def __init__(self, x=None, y=None, **kwargs):
super(EndOfProgramStmt, self).__init__(**kwargs)
self.x = x
self.y = y
def to_excellon(self, settings=None):
stmt = 'M30'
if self.x is not None:
stmt += 'X%s' % write_gerber_value(self.x)
if self.y is not None:
stmt += 'Y%s' % write_gerber_value(self.y)
return stmt
def to_inch(self):
if self.units == 'metric':
self.units = 'inch'
if self.x is not None:
self.x = inch(self.x)
if self.y is not None:
self.y = inch(self.y)
def to_metric(self):
if self.units == 'inch':
self.units = 'metric'
if self.x is not None:
self.x = metric(self.x)
if self.y is not None:
self.y = metric(self.y)
def offset(self, x_offset=0, y_offset=0):
if self.x is not None:
self.x += x_offset
if self.y is not None:
self.y += y_offset
class UnitStmt(ExcellonStatement):
@classmethod
def from_settings(cls, settings):
"""Create the unit statement from the FileSettings"""
return cls(settings.units, settings.zeros)
@classmethod
def from_excellon(cls, line, **kwargs):
units = 'inch' if 'INCH' in line else 'metric'
zeros = 'leading' if 'LZ' in line else 'trailing'
if '0000.00' in line:
format = (4, 2)
elif '000.000' in line:
format = (3, 3)
elif '00.0000' in line:
format = (2, 4)
else:
format = None
return cls(units, zeros, format, **kwargs)
def __init__(self, units='inch', zeros='leading', format=None, **kwargs):
super(UnitStmt, self).__init__(**kwargs)
self.units = units.lower()
self.zeros = zeros
self.format = format
def to_excellon(self, settings=None):
# TODO This won't export the invalid format statement if it exists
stmt = '%s,%s' % ('INCH' if self.units == 'inch' else 'METRIC',
'LZ' if self.zeros == 'leading'
else 'TZ')
return stmt
def to_inch(self):
self.units = 'inch'
def to_metric(self):
self.units = 'metric'
class IncrementalModeStmt(ExcellonStatement):
@classmethod
def from_excellon(cls, line, **kwargs):
return cls('off', **kwargs) if 'OFF' in line else cls('on', **kwargs)
def __init__(self, mode='off', **kwargs):
super(IncrementalModeStmt, self).__init__(**kwargs)
if mode.lower() not in ['on', 'off']:
raise ValueError('Mode may be "on" or "off"')
self.mode = mode
def to_excellon(self, settings=None):
return 'ICI,%s' % ('OFF' if self.mode == 'off' else 'ON')
class VersionStmt(ExcellonStatement):
@classmethod
def from_excellon(cls, line, **kwargs):
version = int(line.split(',')[1])
return cls(version, **kwargs)
def __init__(self, version=1, **kwargs):
super(VersionStmt, self).__init__(**kwargs)
version = int(version)
if version not in [1, 2]:
raise ValueError('Valid versions are 1 or 2')
self.version = version
def to_excellon(self, settings=None):
return 'VER,%d' % self.version
class FormatStmt(ExcellonStatement):
@classmethod
def from_excellon(cls, line, **kwargs):
fmt = int(line.split(',')[1])
return cls(fmt, **kwargs)
def __init__(self, format=1, **kwargs):
super(FormatStmt, self).__init__(**kwargs)
format = int(format)
if format not in [1, 2]:
raise ValueError('Valid formats are 1 or 2')
self.format = format
def to_excellon(self, settings=None):
return 'FMAT,%d' % self.format
@property
def format_tuple(self):
return (self.format, 6 - self.format)
class LinkToolStmt(ExcellonStatement):
@classmethod
def from_excellon(cls, line, **kwargs):
linked = [int(tool) for tool in line.split('/')]
return cls(linked, **kwargs)
def __init__(self, linked_tools, **kwargs):
super(LinkToolStmt, self).__init__(**kwargs)
self.linked_tools = [int(x) for x in linked_tools]
def to_excellon(self, settings=None):
return '/'.join([str(x) for x in self.linked_tools])
class MeasuringModeStmt(ExcellonStatement):
@classmethod
def from_excellon(cls, line, **kwargs):
if not ('M71' in line or 'M72' in line):
raise ValueError('Not a measuring mode statement')
return cls('inch', **kwargs) if 'M72' in line else cls('metric', **kwargs)
def __init__(self, units='inch', **kwargs):
super(MeasuringModeStmt, self).__init__(**kwargs)
units = units.lower()
if units not in ['inch', 'metric']:
raise ValueError('units must be "inch" or "metric"')
self.units = units
def to_excellon(self, settings=None):
return 'M72' if self.units == 'inch' else 'M71'
def to_inch(self):
self.units = 'inch'
def to_metric(self):
self.units = 'metric'
class RouteModeStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(RouteModeStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return 'G00'
class LinearModeStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(LinearModeStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return 'G01'
class DrillModeStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(DrillModeStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return 'G05'
class AbsoluteModeStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(AbsoluteModeStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return 'G90'
class UnknownStmt(ExcellonStatement):
@classmethod
def from_excellon(cls, line, **kwargs):
return cls(line, **kwargs)
def __init__(self, stmt, **kwargs):
super(UnknownStmt, self).__init__(**kwargs)
self.stmt = stmt
def to_excellon(self, settings=None):
return self.stmt
def __str__(self):
return "<Unknown Statement: %s>" % self.stmt
class SlotStmt(ExcellonStatement):
"""
G85 statement. Defines a slot created by multiple drills between two specified points.
Format is two coordinates, split by G85in the middle, for example, XnY0nG85XnYn
"""
@classmethod
def from_points(cls, start, end):
return cls(start[0], start[1], end[0], end[1])
@classmethod
def from_excellon(cls, line, settings, **kwargs):
# Split the line based on the G85 separator
sub_coords = line.split('G85')
(x_start_coord, y_start_coord) = SlotStmt.parse_sub_coords(sub_coords[0], settings)
(x_end_coord, y_end_coord) = SlotStmt.parse_sub_coords(sub_coords[1], settings)
# Some files seem to specify only one of the coordinates
if x_end_coord == None:
x_end_coord = x_start_coord
if y_end_coord == None:
y_end_coord = y_start_coord
c = cls(x_start_coord, y_start_coord, x_end_coord, y_end_coord, **kwargs)
c.units = settings.units
return c
@staticmethod
def parse_sub_coords(line, settings):
x_coord = None
y_coord = None
if line[0] == 'X':
splitline = line.strip('X').split('Y')
x_coord = parse_gerber_value(splitline[0], settings.format,
settings.zero_suppression)
if len(splitline) == 2:
y_coord = parse_gerber_value(splitline[1], settings.format,
settings.zero_suppression)
else:
y_coord = parse_gerber_value(line.strip(' Y'), settings.format,
settings.zero_suppression)
return (x_coord, y_coord)
def __init__(self, x_start=None, y_start=None, x_end=None, y_end=None, **kwargs):
super(SlotStmt, self).__init__(**kwargs)
self.x_start = x_start
self.y_start = y_start
self.x_end = x_end
self.y_end = y_end
self.mode = None
def to_excellon(self, settings):
stmt = ''
if self.x_start is not None:
stmt += 'X%s' % write_gerber_value(self.x_start, settings.format,
settings.zero_suppression)
if self.y_start is not None:
stmt += 'Y%s' % write_gerber_value(self.y_start, settings.format,
settings.zero_suppression)
stmt += 'G85'
if self.x_end is not None:
stmt += 'X%s' % write_gerber_value(self.x_end, settings.format,
settings.zero_suppression)
if self.y_end is not None:
stmt += 'Y%s' % write_gerber_value(self.y_end, settings.format,
settings.zero_suppression)
return stmt
def to_inch(self):
if self.units == 'metric':
self.units = 'inch'
if self.x_start is not None:
self.x_start = inch(self.x_start)
if self.y_start is not None:
self.y_start = inch(self.y_start)
if self.x_end is not None:
self.x_end = inch(self.x_end)
if self.y_end is not None:
self.y_end = inch(self.y_end)
def to_metric(self):
if self.units == 'inch':
self.units = 'metric'
if self.x_start is not None:
self.x_start = metric(self.x_start)
if self.y_start is not None:
self.y_start = metric(self.y_start)
if self.x_end is not None:
self.x_end = metric(self.x_end)
if self.y_end is not None:
self.y_end = metric(self.y_end)
def offset(self, x_offset=0, y_offset=0):
if self.x_start is not None:
self.x_start += x_offset
if self.y_start is not None:
self.y_start += y_offset
if self.x_end is not None:
self.x_end += x_offset
if self.y_end is not None:
self.y_end += y_offset
def __str__(self):
start_str = ''
if self.x_start is not None:
start_str += 'X: %g ' % self.x_start
if self.y_start is not None:
start_str += 'Y: %g ' % self.y_start
end_str = ''
if self.x_end is not None:
end_str += 'X: %g ' % self.x_end
if self.y_end is not None:
end_str += 'Y: %g ' % self.y_end
return '<Slot Statement: %s to %s>' % (start_str, end_str)
def pairwise(iterator):
""" Iterate over list taking two elements at a time.
e.g. [1, 2, 3, 4, 5, 6] ==> [(1, 2), (3, 4), (5, 6)]
"""
itr = iter(iterator)
while True:
yield tuple([next(itr) for i in range(2)])
| gpl-3.0 | 4,613,957,387,060,365,000 | 30.708589 | 102 | 0.557447 | false |
gmimano/commcaretest | corehq/apps/export/custom_export_helpers.py | 1 | 14645 | import json
from corehq.apps.reports.standard import export
from corehq.apps.reports.models import FormExportSchema, HQGroupExportConfiguration, CaseExportSchema
from corehq.apps.reports.standard.export import DeidExportReport
from couchexport.models import ExportTable, ExportSchema, ExportColumn
from django.utils.translation import ugettext as _
from dimagi.utils.decorators.memoized import memoized
from corehq.apps.commtrack.models import StockExportColumn
from corehq.apps.domain.models import Domain
USERNAME_TRANSFORM = 'corehq.apps.export.transforms.user_id_to_username'
OWNERNAME_TRANSFORM = 'corehq.apps.export.transforms.owner_id_to_display'
CASENAME_TRANSFORM = 'corehq.apps.export.transforms.case_id_to_case_name'
class AbstractProperty(object):
def __get__(self, instance, owner):
raise NotImplementedError()
class CustomExportHelper(object):
ExportSchemaClass = AbstractProperty()
ExportReport = AbstractProperty()
export_title = AbstractProperty()
allow_deid = False
allow_repeats = True
subclasses_map = {} # filled in below
export_type = 'form'
@property
def default_order(self):
return {}
@classmethod
def make(cls, request, export_type, domain=None, export_id=None):
export_type = export_type or request.GET.get('request_type', 'form')
return cls.subclasses_map[export_type](request, domain, export_id=export_id)
def update_custom_params(self):
if len(self.custom_export.tables) > 0:
if self.export_stock:
self.custom_export.tables[0].columns.append(
StockExportColumn(domain=self.domain, index='_id')
)
def format_config_for_javascript(self, table_configuration):
return table_configuration
def has_stock_column(self):
return any(
col.doc_type == 'StockExportColumn'
for col in self.custom_export.tables[0].columns
)
class DEID(object):
options = (
('', ''),
(_('Sensitive ID'), 'couchexport.deid.deid_ID'),
(_('Sensitive Date'), 'couchexport.deid.deid_date'),
)
json_options = [{'label': label, 'value': value}
for label, value in options]
def __init__(self, request, domain, export_id=None):
self.request = request
self.domain = domain
self.presave = False
self.transform_dates = False
self.creating_new_export = not bool(export_id)
if export_id:
self.custom_export = self.ExportSchemaClass.get(export_id)
# also update the schema to include potential new stuff
self.custom_export.update_schema()
# enable configuring saved exports from this page
saved_group = HQGroupExportConfiguration.get_for_domain(self.domain)
self.presave = export_id in saved_group.custom_export_ids
self.export_stock = self.has_stock_column()
assert(self.custom_export.doc_type == 'SavedExportSchema')
assert(self.custom_export.type == self.export_type)
assert(self.custom_export.index[0] == domain)
else:
self.custom_export = self.ExportSchemaClass(type=self.export_type)
self.export_stock = False
@property
@memoized
def post_data(self):
return json.loads(self.request.raw_post_data)
def update_custom_export(self):
"""
Updates custom_export object from the request
and saves to the db
"""
post_data = self.post_data
custom_export_json = post_data['custom_export']
SAFE_KEYS = ('default_format', 'is_safe', 'name', 'schema_id', 'transform_dates')
for key in SAFE_KEYS:
self.custom_export[key] = custom_export_json[key]
# update the custom export index (to stay in sync)
schema_id = self.custom_export.schema_id
schema = ExportSchema.get(schema_id)
self.custom_export.index = schema.index
self.presave = post_data['presave']
self.export_stock = post_data['export_stock']
self.custom_export.tables = [
ExportTable.wrap(table)
for table in custom_export_json['tables']
]
table_dict = dict((t.index, t) for t in self.custom_export.tables)
for table in self.custom_export.tables:
if table.index in table_dict:
table_dict[table.index].columns = table.columns
else:
self.custom_export.tables.append(
ExportTable(
index=table.index,
display=self.custom_export.name,
columns=table.columns
)
)
self.update_custom_params()
self.custom_export.save()
if self.presave:
HQGroupExportConfiguration.add_custom_export(self.domain, self.custom_export.get_id)
else:
HQGroupExportConfiguration.remove_custom_export(self.domain, self.custom_export.get_id)
def get_context(self):
table_configuration = self.format_config_for_javascript(self.custom_export.table_configuration)
return {
'custom_export': self.custom_export,
'default_order': self.default_order,
'deid_options': self.DEID.json_options,
'presave': self.presave,
'export_stock': self.export_stock,
'DeidExportReport_name': DeidExportReport.name,
'table_configuration': table_configuration,
'domain': self.domain,
'commtrack_domain': Domain.get_by_name(self.domain).commtrack_enabled,
'helper': {
'back_url': self.ExportReport.get_url(domain=self.domain),
'export_title': self.export_title,
'slug': self.ExportReport.slug,
'allow_deid': self.allow_deid,
'allow_repeats': self.allow_repeats
}
}
class FormCustomExportHelper(CustomExportHelper):
ExportSchemaClass = FormExportSchema
ExportReport = export.ExcelExportReport
allow_deid = True
allow_repeats = True
default_questions = ["form.case.@case_id", "form.meta.timeEnd", "_id", "id", "form.meta.username"]
questions_to_show = default_questions + ["form.meta.timeStart", "received_on"]
@property
def export_title(self):
return _('Export Submissions to Excel')
def __init__(self, request, domain, export_id=None):
super(FormCustomExportHelper, self).__init__(request, domain, export_id)
if not self.custom_export.app_id:
self.custom_export.app_id = request.GET.get('app_id')
def update_custom_params(self):
p = self.post_data['custom_export']
e = self.custom_export
e.include_errors = p['include_errors']
e.app_id = p['app_id']
@property
@memoized
def default_order(self):
return self.custom_export.get_default_order()
def update_table_conf_with_questions(self, table_conf):
column_conf = table_conf[0].get("column_configuration", [])
current_questions = set(self.custom_export.question_order)
remaining_questions = current_questions.copy()
def is_special_type(q):
return any([q.startswith('form.#'), q.startswith('form.@'), q.startswith('form.case.'),
q.startswith('form.meta.'), q.startswith('form.subcase_')])
def generate_additional_columns():
ret = []
case_name_col = CustomColumn(slug='case_name', index='form.case.@case_id', display='info.case_name',
transform=CASENAME_TRANSFORM, show=True, selected=True)
matches = filter(case_name_col.match, column_conf)
if matches:
for match in matches:
case_name_col.format_for_javascript(match)
elif filter(lambda col: col["index"] == case_name_col.index, column_conf):
ret.append(case_name_col.default_column())
return ret
for col in column_conf:
question = col["index"]
if question in remaining_questions:
remaining_questions.discard(question)
col["show"] = True
if question.startswith("form.") and not is_special_type(question) and question not in current_questions:
col["tag"] = "deleted"
col["show"] = False
if question in self.questions_to_show:
col["show"] = True
if self.creating_new_export and (question in self.default_questions or question in current_questions):
col["selected"] = True
column_conf.extend(generate_additional_columns())
column_conf.extend([
ExportColumn(
index=q,
display='',
show=True,
).to_config_format(selected=self.creating_new_export)
for q in remaining_questions
])
# show all questions in repeat groups by default
for conf in table_conf:
if conf["index"].startswith('#.form.'):
for col in conf.get("column_configuration", []):
col["show"] = True
table_conf[0]["column_configuration"] = column_conf
return table_conf
def get_context(self):
ctxt = super(FormCustomExportHelper, self).get_context()
self.update_table_conf_with_questions(ctxt["table_configuration"])
return ctxt
class CustomColumn(object):
def __init__(self, slug, index, display, transform, is_sensitive=False, tag=None, show=False, selected=False):
self.slug = slug
self.index = index
self.display = display
self.transform = transform
self.is_sensitive = is_sensitive
self.tag = tag
self.show = show
self.selected = selected
def match(self, col):
return col['index'] == self.index and col['transform'] == self.transform
def format_for_javascript(self, col):
# this is js --> js conversion so the name is pretty bad
# couch --> javascript UI code
col['special'] = self.slug
def default_column(self):
# this is kinda hacky - mirrors ExportColumn.to_config_format to add custom columns
# to the existing export UI
return {
'index': self.index,
'selected': self.selected,
'display': self.display,
'transform': self.transform,
"is_sensitive": self.is_sensitive,
'tag': self.tag,
'special': self.slug,
'show': self.show,
}
class CaseCustomExportHelper(CustomExportHelper):
ExportSchemaClass = CaseExportSchema
ExportReport = export.CaseExportReport
export_type = 'case'
default_properties = ["_id", "closed", "closed_on", "modified_on", "opened_on", "info.owner_name", "id"]
default_transformed_properties = ["info.closed_by_username", "info.last_modified_by_username",
"info.opened_by_username", "info.owner_name"]
meta_properties = ["_id", "closed", "closed_by", "closed_on", "domain", "computed_modified_on_",
"server_modified_on", "modified_on", "opened_by", "opened_on", "owner_id",
"user_id", "type", "version", "external_id"]
server_properties = ["_rev", "doc_type", "-deletion_id", "initial_processing_complete"]
row_properties = ["id"]
@property
def export_title(self):
return _('Export Cases, Referrals, and Users')
def format_config_for_javascript(self, table_configuration):
custom_columns = [
CustomColumn(slug='last_modified_by_username', index='user_id',
display='info.last_modified_by_username', transform=USERNAME_TRANSFORM),
CustomColumn(slug='opened_by_username', index='opened_by',
display='info.opened_by_username', transform=USERNAME_TRANSFORM),
CustomColumn(slug='closed_by_username', index='closed_by',
display='info.closed_by_username', transform=USERNAME_TRANSFORM),
CustomColumn(slug='owner_name', index='owner_id', display='info.owner_name',
transform=OWNERNAME_TRANSFORM),
]
main_table_columns = table_configuration[0]['column_configuration']
for custom in custom_columns:
matches = filter(custom.match, main_table_columns)
if not matches:
main_table_columns.append(custom.default_column())
else:
for match in matches:
custom.format_for_javascript(match)
return table_configuration
def update_table_conf(self, table_conf):
column_conf = table_conf[0].get("column_configuration", {})
current_properties = set(self.custom_export.case_properties)
remaining_properties = current_properties.copy()
def is_special_type(p):
return any([p in self.meta_properties, p in self.server_properties, p in self.row_properties])
for col in column_conf:
prop = col["index"]
display = col.get('display') or prop
if prop in remaining_properties:
remaining_properties.discard(prop)
col["show"] = True
if not is_special_type(prop) and prop not in current_properties:
col["tag"] = "deleted"
col["show"] = False
if prop in self.default_properties + list(current_properties) or \
display in self.default_transformed_properties:
col["show"] = True
if self.creating_new_export:
col["selected"] = True
column_conf.extend([
ExportColumn(
index=prop,
display='',
show=True,
).to_config_format(selected=self.creating_new_export)
for prop in filter(lambda prop: not prop.startswith("parent/"), remaining_properties)
])
table_conf[0]["column_configuration"] = column_conf
return table_conf
def get_context(self):
ctxt = super(CaseCustomExportHelper, self).get_context()
self.update_table_conf(ctxt["table_configuration"])
return ctxt
CustomExportHelper.subclasses_map.update({
'form': FormCustomExportHelper,
'case': CaseCustomExportHelper,
})
| bsd-3-clause | 1,592,665,312,018,443,300 | 37.539474 | 116 | 0.601571 | false |
Oslandia/vizitown_plugin | cyclone/web.py | 1 | 86048 | # coding: utf-8
#
# Copyright 2010 Alexandre Fiori
# based on the original Tornado by Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The cyclone web framework looks a bit like web.py (http://webpy.org/) or
Google's webapp (http://code.google.com/appengine/docs/python/tools/webapp/),
but with additional tools and optimizations to take advantage of the
non-blocking web server and tools.
Here is the canonical "Hello, world" example app::
import cyclone.web
from twisted.internet import reactor
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
if __name__ == "__main__":
application = tornado.web.Application([
(r"/", MainHandler),
])
reactor.listenTCP(8888, application)
reactor.run()
See the cyclone walkthrough on http://cyclone.io for more details and a good
getting started guide.
Thread-safety notes
-------------------
In general, methods on RequestHandler and elsewhere in cyclone are not
thread-safe. In particular, methods such as write(), finish(), and
flush() must only be called from the main thread. For more information on
using threads, please check the twisted documentation:
http://twistedmatrix.com/documents/current/core/howto/threading.html
"""
from __future__ import absolute_import, division, with_statement
import Cookie
import base64
import binascii
import calendar
import datetime
import email.utils
import functools
import gzip
import hashlib
import hmac
import httplib
import itertools
import mimetypes
import numbers
import os.path
import re
import stat
import sys
import threading
import time
import traceback
import types
import urllib
import urlparse
import uuid
import cyclone
from cyclone import escape
from cyclone import httpserver
from cyclone import locale
from cyclone import template
from cyclone.escape import utf8, _unicode
from cyclone.util import ObjectDict
from cyclone.util import bytes_type
from cyclone.util import import_object
from cyclone.util import unicode_type
from cStringIO import StringIO as BytesIO # python 2
from twisted.python import failure
from twisted.python import log
from twisted.internet import defer
from twisted.internet import protocol
from twisted.internet import reactor
class RequestHandler(object):
"""Subclass this class and define get() or post() to make a handler.
If you want to support more methods than the standard GET/HEAD/POST, you
should override the class variable SUPPORTED_METHODS in your
RequestHandler class.
If you want lists to be serialized when calling self.write() set
serialize_lists to True.
This may have some security implications if you are not protecting against
XSRF with other means (such as a XSRF token).
More details on this vulnerability here:
http://haacked.com/archive/2008/11/20/anatomy-of-a-subtle-json-vulnerability.aspx
"""
SUPPORTED_METHODS = ("GET", "HEAD", "POST", "DELETE", "PATCH", "PUT",
"OPTIONS")
serialize_lists = False
no_keep_alive = False
xsrf_cookie_name = "_xsrf"
_template_loaders = {} # {path: template.BaseLoader}
_template_loader_lock = threading.Lock()
def __init__(self, application, request, **kwargs):
super(RequestHandler, self).__init__()
self.application = application
self.request = request
self._headers_written = False
self._finished = False
self._auto_finish = True
self._transforms = None # will be set in _execute
self.path_args = None
self.path_kwargs = None
self.ui = ObjectDict((n, self._ui_method(m)) for n, m in
application.ui_methods.items())
# UIModules are available as both `modules` and `_modules` in the
# template namespace. Historically only `modules` was available
# but could be clobbered by user additions to the namespace.
# The template {% module %} directive looks in `_modules` to avoid
# possible conflicts.
self.ui["_modules"] = ObjectDict((n, self._ui_module(n, m)) for n, m in
application.ui_modules.items())
self.ui["modules"] = self.ui["_modules"]
self.clear()
self.request.connection.no_keep_alive = self.no_keep_alive
self.initialize(**kwargs)
def initialize(self):
"""Hook for subclass initialization.
A dictionary passed as the third argument of a url spec will be
supplied as keyword arguments to initialize().
Example::
class ProfileHandler(RequestHandler):
def initialize(self, database):
self.database = database
def get(self, username):
...
app = Application([
(r'/user/(.*)', ProfileHandler, dict(database=database)),
])
"""
pass
@property
def settings(self):
"""An alias for `self.application.settings`."""
return self.application.settings
def head(self, *args, **kwargs):
raise HTTPError(405)
def get(self, *args, **kwargs):
raise HTTPError(405)
def post(self, *args, **kwargs):
raise HTTPError(405)
def delete(self, *args, **kwargs):
raise HTTPError(405)
def patch(self, *args, **kwargs):
raise HTTPError(405)
def put(self, *args, **kwargs):
raise HTTPError(405)
def options(self, *args, **kwargs):
raise HTTPError(405)
def prepare(self):
"""Called at the beginning of a request before `get`/`post`/etc.
Override this method to perform common initialization regardless
of the request method.
"""
pass
def on_finish(self):
"""Called after the end of a request.
Override this method to perform cleanup, logging, etc.
This method is a counterpart to `prepare`. ``on_finish`` may
not produce any output, as it is called after the response
has been sent to the client.
"""
pass
def on_connection_close(self, *args, **kwargs):
"""Called in async handlers if the client closed the connection.
Override this to clean up resources associated with
long-lived connections. Note that this method is called only if
the connection was closed during asynchronous processing; if you
need to do cleanup after every request override `on_finish`
instead.
Proxies may keep a connection open for a time (perhaps
indefinitely) after the client has gone away, so this method
may not be called promptly after the end user closes their
connection.
"""
pass
def clear(self):
"""Resets all headers and content for this response."""
# The performance cost of cyclone.httputil.HTTPHeaders is significant
# (slowing down a benchmark with a trivial handler by more than 10%),
# and its case-normalization is not generally necessary for
# headers we generate on the server side, so use a plain dict
# and list instead.
self._headers = {
"Server": "cyclone/%s" % cyclone.version,
"Content-Type": "text/html; charset=UTF-8",
"Date": datetime.datetime.utcnow().strftime(
"%a, %d %b %Y %H:%M:%S GMT"),
}
self._list_headers = []
self.set_default_headers()
if not self.request.supports_http_1_1():
if self.request.headers.get("Connection") == "Keep-Alive":
self.set_header("Connection", "Keep-Alive")
self._write_buffer = []
self._status_code = 200
self._reason = httplib.responses[200]
def set_default_headers(self):
"""Override this to set HTTP headers at the beginning of the request.
For example, this is the place to set a custom ``Server`` header.
Note that setting such headers in the normal flow of request
processing may not do what you want, since headers may be reset
during error handling.
"""
pass
def set_status(self, status_code, reason=None):
"""Sets the status code for our response.
:arg int status_code: Response status code. If `reason` is ``None``,
it must be present in `httplib.responses`.
:arg string reason: Human-readable reason phrase describing the status
code. If ``None``, it will be filled in from `httplib.responses`.
"""
self._status_code = status_code
if reason is not None:
self._reason = escape.native_str(reason)
else:
try:
self._reason = httplib.responses[status_code]
except KeyError:
raise ValueError("unknown status code %d", status_code)
def get_status(self):
"""Returns the status code for our response."""
return self._status_code
def set_header(self, name, value):
"""Sets the given response header name and value.
If a datetime is given, we automatically format it according to the
HTTP specification. If the value is not a string, we convert it to
a string. All header values are then encoded as UTF-8.
"""
self._headers[name] = self._convert_header_value(value)
def add_header(self, name, value):
"""Adds the given response header and value.
Unlike `set_header`, `add_header` may be called multiple times
to return multiple values for the same header.
"""
self._list_headers.append((name, self._convert_header_value(value)))
def clear_header(self, name):
"""Clears an outgoing header, undoing a previous `set_header` call.
Note that this method does not apply to multi-valued headers
set by `add_header`.
"""
if name in self._headers:
del self._headers[name]
def _convert_header_value(self, value):
if isinstance(value, bytes_type):
pass
elif isinstance(value, unicode_type):
value = value.encode("utf-8")
elif isinstance(value, numbers.Integral):
# return immediately since we know the converted value will be safe
return str(value)
elif isinstance(value, datetime.datetime):
t = calendar.timegm(value.utctimetuple())
return email.utils.formatdate(t, localtime=False, usegmt=True)
else:
raise TypeError("Unsupported header value %r" % value)
# If \n is allowed into the header, it is possible to inject
# additional headers or split the request. Also cap length to
# prevent obviously erroneous values.
if len(value) > 4000 or re.search(r"[\x00-\x1f]", value):
raise ValueError("Unsafe header value %r", value)
return value
_ARG_DEFAULT = []
def get_argument(self, name, default=_ARG_DEFAULT, strip=True):
"""Returns the value of the argument with the given name.
If default is not provided, the argument is considered to be
required, and we throw an HTTP 400 exception if it is missing.
If the argument appears in the url more than once, we return the
last value.
The returned value is always unicode.
"""
args = self.get_arguments(name, strip=strip)
if not args:
if default is self._ARG_DEFAULT:
raise HTTPError(400, "Missing argument " + name)
return default
return args[-1]
def get_arguments(self, name, strip=True):
"""Returns a list of the arguments with the given name.
If the argument is not present, returns an empty list.
The returned values are always unicode.
"""
values = []
for v in self.request.arguments.get(name, []):
v = self.decode_argument(v, name=name)
if isinstance(v, unicode_type):
# Get rid of any weird control chars (unless decoding gave
# us bytes, in which case leave it alone)
v = re.sub(r"[\x00-\x08\x0e-\x1f]", " ", v)
if strip:
v = v.strip()
values.append(v)
return values
def decode_argument(self, value, name=None):
"""Decodes an argument from the request.
The argument has been percent-decoded and is now a byte string.
By default, this method decodes the argument as utf-8 and returns
a unicode string, but this may be overridden in subclasses.
This method is used as a filter for both get_argument() and for
values extracted from the url and passed to get()/post()/etc.
The name of the argument is provided if known, but may be None
(e.g. for unnamed groups in the url regex).
"""
return _unicode(value)
@property
def cookies(self):
return self.request.cookies
def get_cookie(self, name, default=None):
"""Gets the value of the cookie with the given name, else default."""
if self.request.cookies is not None and name in self.request.cookies:
return self.request.cookies[name].value
return default
def set_cookie(self, name, value, domain=None, expires=None, path="/",
expires_days=None, **kwargs):
"""Sets the given cookie name/value with the given options.
Additional keyword arguments are set on the Cookie.Morsel directly.
See http://docs.python.org/library/cookie.html#morsel-objects
for available attributes.
"""
# The cookie library only accepts type str, in both python 2 and 3
name = escape.native_str(name)
value = escape.native_str(value)
if re.search(r"[\x00-\x20]", name + value):
# Don't let us accidentally inject bad stuff
raise ValueError("Invalid cookie %r: %r" % (name, value))
if not hasattr(self, "_new_cookie"):
self._new_cookie = Cookie.SimpleCookie()
if name in self._new_cookie:
del self._new_cookie[name]
self._new_cookie[name] = value
morsel = self._new_cookie[name]
if domain:
morsel["domain"] = domain
if expires_days is not None and not expires:
expires = datetime.datetime.utcnow() + datetime.timedelta(
days=expires_days)
if expires:
timestamp = calendar.timegm(expires.utctimetuple())
morsel["expires"] = email.utils.formatdate(
timestamp, localtime=False, usegmt=True)
if path:
morsel["path"] = path
for k, v in kwargs.items():
if k == 'max_age':
k = 'max-age'
morsel[k] = v
def clear_cookie(self, name, path="/", domain=None):
"""Deletes the cookie with the given name."""
expires = datetime.datetime.utcnow() - datetime.timedelta(days=365)
self.set_cookie(name, value="", path=path, expires=expires,
domain=domain)
def clear_all_cookies(self):
"""Deletes all the cookies the user sent with this request."""
for name in self.request.cookies.iterkeys():
self.clear_cookie(name)
def set_secure_cookie(self, name, value, expires_days=30, **kwargs):
"""Signs and timestamps a cookie so it cannot be forged.
You must specify the ``cookie_secret`` setting in your Application
to use this method. It should be a long, random sequence of bytes
to be used as the HMAC secret for the signature.
To read a cookie set with this method, use `get_secure_cookie()`.
Note that the ``expires_days`` parameter sets the lifetime of the
cookie in the browser, but is independent of the ``max_age_days``
parameter to `get_secure_cookie`.
Secure cookies may contain arbitrary byte values, not just unicode
strings (unlike regular cookies)
"""
self.set_cookie(name, self.create_signed_value(name, value),
expires_days=expires_days, **kwargs)
def create_signed_value(self, name, value):
"""Signs and timestamps a string so it cannot be forged.
Normally used via set_secure_cookie, but provided as a separate
method for non-cookie uses. To decode a value not stored
as a cookie use the optional value argument to get_secure_cookie.
"""
self.require_setting("cookie_secret", "secure cookies")
return create_signed_value(self.application.settings["cookie_secret"],
name, value)
def get_secure_cookie(self, name, value=None, max_age_days=31):
"""Returns the given signed cookie if it validates, or None.
The decoded cookie value is returned as a byte string (unlike
`get_cookie`).
"""
self.require_setting("cookie_secret", "secure cookies")
if value is None:
value = self.get_cookie(name)
return decode_signed_value(self.application.settings["cookie_secret"],
name, value, max_age_days=max_age_days)
def redirect(self, url, permanent=False, status=None):
"""Sends a redirect to the given (optionally relative) URL.
If the ``status`` argument is specified, that value is used as the
HTTP status code; otherwise either 301 (permanent) or 302
(temporary) is chosen based on the ``permanent`` argument.
The default is 302 (temporary).
"""
if self._headers_written:
raise Exception("Cannot redirect after headers have been written")
if status is None:
status = 301 if permanent else 302
else:
assert isinstance(status, types.IntType) and 300 <= status <= 399
self.set_status(status)
# Remove whitespace
url = re.sub(r"[\x00-\x20]+", "", utf8(url))
self.set_header("Location", urlparse.urljoin(utf8(self.request.uri),
url))
self.finish()
def write(self, chunk):
"""Writes the given chunk to the output buffer.
To write the output to the network, use the flush() method below.
If the given chunk is a dictionary, we write it as JSON and set
the Content-Type of the response to be application/json.
(if you want to send JSON as a different Content-Type, call
set_header *after* calling write()).
Note that lists are not converted to JSON because of a potential
cross-site security vulnerability. All JSON output should be
wrapped in a dictionary. More details at
http://haacked.com/archive/2008/11/20/\
anatomy-of-a-subtle-json-vulnerability.aspx
"""
if self._finished:
raise RuntimeError("Cannot write() after finish(). May be caused "
"by using async operations without the "
"@asynchronous decorator.")
if isinstance(chunk, types.DictType) or \
(self.serialize_lists and isinstance(chunk, types.ListType)):
chunk = escape.json_encode(chunk)
self.set_header("Content-Type", "application/json")
chunk = utf8(chunk)
self._write_buffer.append(chunk)
def render(self, template_name, **kwargs):
"""Renders the template with the given arguments as the response."""
html = self.render_string(template_name, **kwargs)
# Insert the additional JS and CSS added by the modules on the page
js_embed = []
js_files = []
css_embed = []
css_files = []
html_heads = []
html_bodies = []
for module in getattr(self, "_active_modules", {}).values():
embed_part = module.embedded_javascript()
if embed_part:
js_embed.append(utf8(embed_part))
file_part = module.javascript_files()
if file_part:
if isinstance(file_part, (unicode_type, bytes_type)):
js_files.append(file_part)
else:
js_files.extend(file_part)
embed_part = module.embedded_css()
if embed_part:
css_embed.append(utf8(embed_part))
file_part = module.css_files()
if file_part:
if isinstance(file_part, (unicode_type, bytes_type)):
css_files.append(file_part)
else:
css_files.extend(file_part)
head_part = module.html_head()
if head_part:
html_heads.append(utf8(head_part))
body_part = module.html_body()
if body_part:
html_bodies.append(utf8(body_part))
def is_absolute(path):
return any(path.startswith(x) for x in ["/", "http:", "https:"])
if js_files:
# Maintain order of JavaScript files given by modules
paths = []
unique_paths = set()
for path in js_files:
if not is_absolute(path):
path = self.static_url(path)
if path not in unique_paths:
paths.append(path)
unique_paths.add(path)
js = ''.join('<script src="' + escape.xhtml_escape(p) +
'" type="text/javascript"></script>'
for p in paths)
sloc = html.rindex('</body>')
html = html[:sloc] + utf8(js) + '\n' + html[sloc:]
if js_embed:
js = '<script type="text/javascript">\n//<![CDATA[\n' + \
'\n'.join(js_embed) + '\n//]]>\n</script>'
sloc = html.rindex('</body>')
html = html[:sloc] + js + '\n' + html[sloc:]
if css_files:
paths = []
unique_paths = set()
for path in css_files:
if not is_absolute(path):
path = self.static_url(path)
if path not in unique_paths:
paths.append(path)
unique_paths.add(path)
css = ''.join('<link href="' + escape.xhtml_escape(p) + '" '
'type="text/css" rel="stylesheet"/>'
for p in paths)
hloc = html.index('</head>')
html = html[:hloc] + utf8(css) + '\n' + html[hloc:]
if css_embed:
css = '<style type="text/css">\n' + '\n'.join(css_embed) + \
'\n</style>'
hloc = html.index('</head>')
html = html[:hloc] + css + '\n' + html[hloc:]
if html_heads:
hloc = html.index('</head>')
html = html[:hloc] + ''.join(html_heads) + '\n' + html[hloc:]
if html_bodies:
hloc = html.index('</body>')
html = html[:hloc] + ''.join(html_bodies) + '\n' + html[hloc:]
self.finish(html)
def render_string(self, template_name, **kwargs):
"""Generate the given template with the given arguments.
We return the generated string. To generate and write a template
as a response, use render() above.
"""
# If no template_path is specified, use the path of the calling file
template_path = self.get_template_path()
if not template_path:
frame = sys._getframe(0)
web_file = frame.f_code.co_filename
while frame.f_code.co_filename == web_file:
frame = frame.f_back
template_path = os.path.dirname(frame.f_code.co_filename)
with RequestHandler._template_loader_lock:
if template_path not in RequestHandler._template_loaders:
loader = self.create_template_loader(template_path)
RequestHandler._template_loaders[template_path] = loader
else:
loader = RequestHandler._template_loaders[template_path]
t = loader.load(template_name)
namespace = self.get_template_namespace()
namespace.update(kwargs)
return t.generate(**namespace)
def get_template_namespace(self):
"""Returns a dictionary to be used as the default template namespace.
May be overridden by subclasses to add or modify values.
The results of this method will be combined with additional
defaults in the `tornado.template` module and keyword arguments
to `render` or `render_string`.
"""
namespace = dict(
handler=self,
request=self.request,
current_user=self.current_user,
locale=self.locale,
_=self.locale.translate,
static_url=self.static_url,
xsrf_form_html=self.xsrf_form_html,
reverse_url=self.reverse_url
)
namespace.update(self.ui)
return namespace
def create_template_loader(self, template_path):
"""Returns a new template loader for the given path.
May be overridden by subclasses. By default returns a
directory-based loader on the given path, using the
``autoescape`` application setting. If a ``template_loader``
application setting is supplied, uses that instead.
"""
settings = self.application.settings
if "template_loader" in settings:
return settings["template_loader"]
kwargs = {}
if "autoescape" in settings:
# autoescape=None means "no escaping", so we have to be sure
# to only pass this kwarg if the user asked for it.
kwargs["autoescape"] = settings["autoescape"]
return template.Loader(template_path, **kwargs)
def flush(self, include_footers=False):
"""Flushes the current output buffer to the network."""
chunk = "".join(self._write_buffer)
self._write_buffer = []
if not self._headers_written:
self._headers_written = True
for transform in self._transforms:
self._status_code, self._headers, chunk = \
transform.transform_first_chunk(
self._status_code, self._headers, chunk, include_footers)
headers = self._generate_headers()
else:
for transform in self._transforms:
chunk = transform.transform_chunk(chunk, include_footers)
headers = ""
# Ignore the chunk and only write the headers for HEAD requests
if self.request.method == "HEAD":
if headers:
self.request.write(headers)
return
if headers or chunk:
self.request.write(headers + chunk)
def notifyFinish(self):
"""Returns a deferred, which is fired when the request is terminated
and the connection is closed.
"""
return self.request.notifyFinish()
def finish(self, chunk=None):
"""Finishes this response, ending the HTTP request."""
if self._finished:
raise RuntimeError("finish() called twice. May be caused "
"by using async operations without the "
"@asynchronous decorator.")
if chunk is not None:
self.write(chunk)
# Automatically support ETags and add the Content-Length header if
# we have not flushed any content yet.
if not self._headers_written:
if (self._status_code == 200 and
self.request.method in ("GET", "HEAD") and
"Etag" not in self._headers):
etag = self.compute_etag()
if etag is not None:
self.set_header("Etag", etag)
inm = self.request.headers.get("If-None-Match")
if inm and inm.find(etag) != -1:
self._write_buffer = []
self.set_status(304)
if self._status_code == 304:
assert not self._write_buffer, "Cannot send body with 304"
self._clear_headers_for_304()
elif "Content-Length" not in self._headers:
content_length = sum(len(part) for part in self._write_buffer)
self.set_header("Content-Length", content_length)
self.flush(include_footers=True)
self.request.finish()
self._log()
self._finished = True
self.on_finish()
def send_error(self, status_code=500, **kwargs):
"""Sends the given HTTP error code to the browser.
If `flush()` has already been called, it is not possible to send
an error, so this method will simply terminate the response.
If output has been written but not yet flushed, it will be discarded
and replaced with the error page.
Override `write_error()` to customize the error page that is returned.
Additional keyword arguments are passed through to `write_error`.
"""
if self._headers_written:
log.msg("Cannot send error response after headers written")
if not self._finished:
self.finish()
return
self.clear()
reason = None
if "exc_info" in kwargs:
e = kwargs["exc_info"][1]
if isinstance(e, HTTPError) and e.reason:
reason = e.reason
elif "exception" in kwargs:
e = kwargs["exception"]
if isinstance(e, HTTPAuthenticationRequired):
args = ",".join(['%s="%s"' % (k, v)
for k, v in e.kwargs.items()])
self.set_header("WWW-Authenticate", "%s %s" %
(e.auth_type, args))
self.set_status(status_code, reason=reason)
try:
self.write_error(status_code, **kwargs)
except Exception, e:
log.msg("Uncaught exception in write_error: " + str(e))
if not self._finished:
self.finish()
def write_error(self, status_code, **kwargs):
"""Override to implement custom error pages.
``write_error`` may call `write`, `render`, `set_header`, etc
to produce output as usual.
If this error was caused by an uncaught exception (including
HTTPError), an ``exc_info`` triple will be available as
``kwargs["exc_info"]``. Note that this exception may not be
the "current" exception for purposes of methods like
``sys.exc_info()`` or ``traceback.format_exc``.
For historical reasons, if a method ``get_error_html`` exists,
it will be used instead of the default ``write_error`` implementation.
``get_error_html`` returned a string instead of producing output
normally, and had different semantics for exception handling.
Users of ``get_error_html`` are encouraged to convert their code
to override ``write_error`` instead.
"""
if hasattr(self, 'get_error_html'):
if 'exc_info' in kwargs:
exc_info = kwargs.pop('exc_info')
kwargs['exception'] = exc_info[1]
try:
# Put the traceback into sys.exc_info()
raise exc_info[0], exc_info[1], exc_info[2]
except Exception:
self.finish(self.get_error_html(status_code, **kwargs))
else:
self.finish(self.get_error_html(status_code, **kwargs))
return
if self.settings.get("debug") and "exc_info" in kwargs:
# in debug mode, try to send a traceback
self.set_header('Content-Type', 'text/plain')
for line in traceback.format_exception(*kwargs["exc_info"]):
self.write(line)
self.finish()
else:
self.finish("<html><title>%(code)d: %(message)s</title>"
"<body>%(code)d: %(message)s</body></html>" %
{"code": status_code, "message": self._reason})
@property
def locale(self):
"""The local for the current session.
Determined by either get_user_locale, which you can override to
set the locale based on, e.g., a user preference stored in a
database, or get_browser_locale, which uses the Accept-Language
header.
"""
if not hasattr(self, "_locale"):
self._locale = self.get_user_locale()
if not self._locale:
self._locale = self.get_browser_locale()
assert self._locale
return self._locale
def get_user_locale(self):
"""Override to determine the locale from the authenticated user.
If None is returned, we fall back to get_browser_locale().
This method should return a cyclone.locale.Locale object,
most likely obtained via a call like cyclone.locale.get("en")
"""
return None
def get_browser_locale(self, default="en_US"):
"""Determines the user's locale from Accept-Language header.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.4
"""
if "Accept-Language" in self.request.headers:
languages = self.request.headers["Accept-Language"].split(",")
locales = []
for language in languages:
parts = language.strip().split(";")
if len(parts) > 1 and parts[1].startswith("q="):
try:
score = float(parts[1][2:])
except (ValueError, TypeError):
score = 0.0
else:
score = 1.0
locales.append((parts[0], score))
if locales:
locales.sort(key=lambda pair: pair[1], reverse=True)
codes = [l[0] for l in locales]
return locale.get(*codes)
return locale.get(default)
@property
def current_user(self):
"""The authenticated user for this request.
Determined by either get_current_user, which you can override to
set the user based on, e.g., a cookie. If that method is not
overridden, this method always returns None.
We lazy-load the current user the first time this method is called
and cache the result after that.
"""
if not hasattr(self, "_current_user"):
self._current_user = self.get_current_user()
return self._current_user
def get_current_user(self):
"""Override to determine the current user from, e.g., a cookie."""
return None
def get_login_url(self):
"""Override to customize the login URL based on the request.
By default, we use the 'login_url' application setting.
"""
self.require_setting("login_url", "@cyclone.web.authenticated")
return self.application.settings["login_url"]
def get_template_path(self):
"""Override to customize template path for each handler.
By default, we use the 'template_path' application setting.
Return None to load templates relative to the calling file.
"""
return self.application.settings.get("template_path")
@property
def xsrf_token(self):
"""The XSRF-prevention token for the current user/session.
To prevent cross-site request forgery, we set an '_xsrf' cookie
and include the same '_xsrf' value as an argument with all POST
requests. If the two do not match, we reject the form submission
as a potential forgery.
See http://en.wikipedia.org/wiki/Cross-site_request_forgery
"""
if not hasattr(self, "_xsrf_token"):
token = self.get_cookie(self.xsrf_cookie_name)
if not token:
token = binascii.b2a_hex(uuid.uuid4().bytes)
expires_days = 30 if self.current_user else None
self.set_cookie(self.xsrf_cookie_name, token, expires_days=expires_days)
self._xsrf_token = token
return self._xsrf_token
def check_xsrf_cookie(self):
"""Verifies that the '_xsrf' cookie matches the '_xsrf' argument.
To prevent cross-site request forgery, we set an '_xsrf'
cookie and include the same value as a non-cookie
field with all POST requests. If the two do not match, we
reject the form submission as a potential forgery.
The _xsrf value may be set as either a form field named _xsrf
or in a custom HTTP header named X-XSRFToken or X-CSRFToken
(the latter is accepted for compatibility with Django).
See http://en.wikipedia.org/wiki/Cross-site_request_forgery
Prior to release 1.1.1, this check was ignored if the HTTP header
"X-Requested-With: XMLHTTPRequest" was present. This exception
has been shown to be insecure and has been removed. For more
information please see
http://www.djangoproject.com/weblog/2011/feb/08/security/
http://weblog.rubyonrails.org/2011/2/8/\
csrf-protection-bypass-in-ruby-on-rails
"""
token = (self.get_argument(self.xsrf_cookie_name, None) or
self.request.headers.get("X-Xsrftoken") or
self.request.headers.get("X-Csrftoken"))
if not token:
raise HTTPError(403, "'_xsrf' argument missing from POST")
if self.xsrf_token != token:
raise HTTPError(403, "XSRF cookie does not match POST argument")
def xsrf_form_html(self):
"""An HTML <input/> element to be included with all POST forms.
It defines the _xsrf input value, which we check on all POST
requests to prevent cross-site request forgery. If you have set
the 'xsrf_cookies' application setting, you must include this
HTML within all of your HTML forms.
See check_xsrf_cookie() above for more information.
"""
return '<input type="hidden" name="' + self.xsrf_cookie_name + \
'" value="' + escape.xhtml_escape(self.xsrf_token) + '"/>'
def static_url(self, path, include_host=None):
"""Returns a static URL for the given relative static file path.
This method requires you set the 'static_path' setting in your
application (which specifies the root directory of your static
files).
We append ?v=<signature> to the returned URL, which makes our
static file handler set an infinite expiration header on the
returned content. The signature is based on the content of the
file.
By default this method returns URLs relative to the current
host, but if ``include_host`` is true the URL returned will be
absolute. If this handler has an ``include_host`` attribute,
that value will be used as the default for all `static_url`
calls that do not pass ``include_host`` as a keyword argument.
"""
self.require_setting("static_path", "static_url")
static_handler_class = self.settings.get(
"static_handler_class", StaticFileHandler)
if include_host is None:
include_host = getattr(self, "include_host", False)
if include_host:
base = self.request.protocol + "://" + self.request.host + \
static_handler_class.make_static_url(self.settings, path)
else:
base = static_handler_class.make_static_url(self.settings, path)
return base
def async_callback(self, callback, *args, **kwargs):
"""Obsolete - catches exceptions from the wrapped function.
This function is unnecessary since Tornado 1.1.
"""
if callback is None:
return None
if args or kwargs:
callback = functools.partial(callback, *args, **kwargs)
def wrapper(*args, **kwargs):
try:
return callback(*args, **kwargs)
except Exception, e:
if self._headers_written:
log.msg("Exception after headers written: " + e)
else:
self._handle_request_exception(e)
return wrapper
def require_setting(self, name, feature="this feature"):
"""Raises an exception if the given app setting is not defined."""
if not self.application.settings.get(name):
raise Exception("You must define the '%s' setting in your "
"application to use %s" % (name, feature))
def reverse_url(self, name, *args):
"""Alias for `Application.reverse_url`."""
return self.application.reverse_url(name, *args)
def compute_etag(self):
"""Computes the etag header to be used for this request.
May be overridden to provide custom etag implementations,
or may return None to disable cyclone's default etag support.
"""
hasher = hashlib.sha1()
for part in self._write_buffer:
hasher.update(part)
return '"' + hasher.hexdigest() + '"'
def _execute(self, transforms, *args, **kwargs):
"""Executes this request with the given output transforms."""
self._transforms = transforms
try:
if self.request.method not in self.SUPPORTED_METHODS:
raise HTTPError(405)
self.path_args = [self.decode_argument(arg) for arg in args]
self.path_kwargs = dict((k, self.decode_argument(v, name=k))
for (k, v) in kwargs.items())
# If XSRF cookies are turned on, reject form submissions without
# the proper cookie
if self.request.method not in ("GET", "HEAD", "OPTIONS") and \
self.application.settings.get("xsrf_cookies"): # is True
if not getattr(self, "no_xsrf", False):
self.check_xsrf_cookie()
defer.maybeDeferred(self.prepare).addCallbacks(
self._execute_handler,
lambda f: self._handle_request_exception(f.value),
callbackArgs=(args, kwargs))
except Exception, e:
self._handle_request_exception(e)
def _deferred_handler(self, function, *args, **kwargs):
try:
result = function(*args, **kwargs)
except:
return defer.fail(failure.Failure(
captureVars=defer.Deferred.debug))
else:
if isinstance(result, defer.Deferred):
return result
elif isinstance(result, types.GeneratorType):
# This may degrade performance a bit, but at least avoid the
# server from breaking when someone call yield without
# decorating their handler with @inlineCallbacks.
log.msg("[warning] %s.%s() returned a generator. "
"Perhaps it should be decorated with "
"@inlineCallbacks." % (self.__class__.__name__,
self.request.method.lower()))
return self._deferred_handler(defer.inlineCallbacks(function),
*args, **kwargs)
elif isinstance(result, failure.Failure):
return defer.fail(result)
else:
return defer.succeed(result)
def _execute_handler(self, r, args, kwargs):
if not self._finished:
args = [self.decode_argument(arg) for arg in args]
kwargs = dict((k, self.decode_argument(v, name=k))
for (k, v) in kwargs.iteritems())
function = getattr(self, self.request.method.lower())
#d = defer.maybeDeferred(function, *args, **kwargs)
d = self._deferred_handler(function, *args, **kwargs)
d.addCallbacks(self._execute_success, self._execute_failure)
self.notifyFinish().addCallback(self.on_connection_close)
def _execute_success(self, ign):
if self._auto_finish and not self._finished:
return self.finish()
def _execute_failure(self, err):
return self._handle_request_exception(err)
def _generate_headers(self):
reason = self._reason
lines = [utf8(self.request.version + " " +
str(self._status_code) +
" " + reason)]
lines.extend([(utf8(n) + ": " + utf8(v)) for n, v in
itertools.chain(self._headers.items(), self._list_headers)])
if hasattr(self, "_new_cookie"):
for cookie in self._new_cookie.values():
lines.append(utf8("Set-Cookie: " + cookie.OutputString(None)))
return "\r\n".join(lines) + "\r\n\r\n"
def _log(self):
"""Logs the current request.
Sort of deprecated since this functionality was moved to the
Application, but left in place for the benefit of existing apps
that have overridden this method.
"""
self.application.log_request(self)
def _request_summary(self):
return self.request.method + " " + self.request.uri + " (" + \
self.request.remote_ip + ")"
def _handle_request_exception(self, e):
try:
# These are normally twisted.python.failure.Failure
if isinstance(e.value, (template.TemplateError,
HTTPError, HTTPAuthenticationRequired)):
e = e.value
except:
pass
if isinstance(e, template.TemplateError):
log.msg(str(e))
self.send_error(500, exception=e)
elif isinstance(e, (HTTPError, HTTPAuthenticationRequired)):
if e.log_message and self.settings.get("debug") is True:
log.msg(str(e))
if e.status_code not in httplib.responses:
log.msg("Bad HTTP status code: " + repr(e.status_code))
e.status_code = 500
self.send_error(e.status_code, exception=e)
else:
log.msg("Uncaught exception\n" + str(e))
if self.settings.get("debug"):
log.msg(repr(self.request))
self.send_error(500, exception=e)
def _ui_module(self, name, module):
def render(*args, **kwargs):
if not hasattr(self, "_active_modules"):
self._active_modules = {}
if name not in self._active_modules:
self._active_modules[name] = module(self)
rendered = self._active_modules[name].render(*args, **kwargs)
return rendered
return render
def _ui_method(self, method):
return lambda *args, **kwargs: method(self, *args, **kwargs)
def _clear_headers_for_304(self):
# 304 responses should not contain entity headers (defined in
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec7.html#sec7.1)
# not explicitly allowed by
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
headers = ["Allow", "Content-Encoding", "Content-Language",
"Content-Length", "Content-MD5", "Content-Range",
"Content-Type", "Last-Modified"]
for h in headers:
self.clear_header(h)
def asynchronous(method):
"""Wrap request handler methods with this if they are asynchronous.
If this decorator is given, the response is not finished when the
method returns. It is up to the request handler to call self.finish()
to terminate the HTTP request. Without this decorator, the request is
automatically finished when the get() or post() method returns. ::
from twisted.internet import reactor
class MyRequestHandler(web.RequestHandler):
@web.asynchronous
def get(self):
self.write("Processing your request...")
reactor.callLater(5, self.do_something)
def do_something(self):
self.finish("done!")
It may be used for Comet and similar push techniques.
http://en.wikipedia.org/wiki/Comet_(programming)
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
self._auto_finish = False
return method(self, *args, **kwargs)
return wrapper
def removeslash(method):
"""Use this decorator to remove trailing slashes from the request path.
For example, a request to ``'/foo/'`` would redirect to ``'/foo'`` with
this decorator. Your request handler mapping should use a regular
expression like ``r'/foo/*'`` in conjunction with using the decorator.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if self.request.path.endswith("/"):
if self.request.method in ("GET", "HEAD", "POST", "PUT", "DELETE"):
uri = self.request.path.rstrip("/")
if uri: # don't try to redirect '/' to ''
if self.request.query:
uri = uri + "?" + self.request.query
self.redirect(uri, permanent=True)
return
else:
raise HTTPError(404)
return method(self, *args, **kwargs)
return wrapper
def addslash(method):
"""Use this decorator to add a missing trailing slash to the request path.
For example, a request to '/foo' would redirect to '/foo/' with this
decorator. Your request handler mapping should use a regular expression
like r'/foo/?' in conjunction with using the decorator.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self.request.path.endswith("/"):
if self.request.method in ("GET", "HEAD", "POST", "PUT", "DELETE"):
uri = self.request.path + "/"
if self.request.query:
uri = uri + "?" + self.request.query
self.redirect(uri, permanent=True)
return
raise HTTPError(404)
return method(self, *args, **kwargs)
return wrapper
class Application(protocol.ServerFactory):
"""A collection of request handlers that make up a web application.
Instances of this class are callable and can be passed directly to
HTTPServer to serve the application::
application = web.Application([
(r"/", MainPageHandler),
])
reactor.listenTCP(8888, application)
reactor.run()
The constructor for this class takes in a list of URLSpec objects
or (regexp, request_class) tuples. When we receive requests, we
iterate over the list in order and instantiate an instance of the
first request class whose regexp matches the request path.
Each tuple can contain an optional third element, which should be a
dictionary if it is present. That dictionary is passed as keyword
arguments to the contructor of the handler. This pattern is used
for the StaticFileHandler below (note that a StaticFileHandler
can be installed automatically with the static_path setting described
below)::
application = web.Application([
(r"/static/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
])
We support virtual hosts with the add_handlers method, which takes in
a host regular expression as the first argument::
application.add_handlers(r"www\.myhost\.com", [
(r"/article/([0-9]+)", ArticleHandler),
])
You can serve static files by sending the static_path setting as a
keyword argument. We will serve those files from the /static/ URI
(this is configurable with the static_url_prefix setting),
and we will serve /favicon.ico and /robots.txt from the same directory.
A custom subclass of StaticFileHandler can be specified with the
static_handler_class setting.
.. attribute:: settings
Additonal keyword arguments passed to the constructor are saved in the
`settings` dictionary, and are often referred to in documentation as
"application settings".
"""
protocol = httpserver.HTTPConnection
def __init__(self, handlers=None, default_host="",
transforms=None, **settings):
if transforms is None:
self.transforms = []
if settings.get("gzip"):
self.transforms.append(GZipContentEncoding)
self.transforms.append(ChunkedTransferEncoding)
else:
self.transforms = transforms
self.handlers = []
self.named_handlers = {}
self.default_host = default_host
self.settings = ObjectDict(settings)
self.ui_modules = {"linkify": _linkify,
"xsrf_form_html": _xsrf_form_html,
"Template": TemplateModule}
self.ui_methods = {}
self._load_ui_modules(settings.get("ui_modules", {}))
self._load_ui_methods(settings.get("ui_methods", {}))
if "static_path" in self.settings:
path = self.settings["static_path"]
handlers = list(handlers or [])
static_url_prefix = settings.get("static_url_prefix",
"/static/")
static_handler_class = settings.get("static_handler_class",
StaticFileHandler)
static_handler_args = settings.get("static_handler_args", {})
static_handler_args["path"] = path
for pattern in [re.escape(static_url_prefix) + r"(.*)",
r"/(favicon\.ico)", r"/(robots\.txt)"]:
handlers.insert(0, (pattern, static_handler_class,
static_handler_args))
if handlers:
self.add_handlers(".*$", handlers)
def add_handlers(self, host_pattern, host_handlers):
"""Appends the given handlers to our handler list.
Host patterns are processed sequentially in the order they were
added. All matching patterns will be considered.
"""
if not host_pattern.endswith("$"):
host_pattern += "$"
handlers = []
# The handlers with the wildcard host_pattern are a special
# case - they're added in the constructor but should have lower
# precedence than the more-precise handlers added later.
# If a wildcard handler group exists, it should always be last
# in the list, so insert new groups just before it.
if self.handlers and self.handlers[-1][0].pattern == '.*$':
self.handlers.insert(-1, (re.compile(host_pattern), handlers))
else:
self.handlers.append((re.compile(host_pattern), handlers))
for spec in host_handlers:
if isinstance(spec, types.TupleType):
assert len(spec) in (2, 3)
pattern = spec[0]
handler = spec[1]
if isinstance(handler, types.StringType):
# import the Module and instantiate the class
# Must be a fully qualified name (module.ClassName)
try:
handler = import_object(handler)
except ImportError, e:
reactor.callWhenRunning(log.msg,
"Unable to load handler '%s' for "
"'%s': %s" % (handler, pattern, e))
continue
if len(spec) == 3:
kwargs = spec[2]
else:
kwargs = {}
spec = URLSpec(pattern, handler, kwargs)
handlers.append(spec)
if spec.name:
if spec.name in self.named_handlers:
log.msg("Multiple handlers named %s; "
"replacing previous value" % spec.name)
self.named_handlers[spec.name] = spec
def add_transform(self, transform_class):
"""Adds the given OutputTransform to our transform list."""
self.transforms.append(transform_class)
def _get_host_handlers(self, request):
host = request.host.lower().split(':')[0]
matches = []
for pattern, handlers in self.handlers:
if pattern.match(host):
matches.extend(handlers)
# Look for default host if not behind load balancer (for debugging)
if not matches and "X-Real-Ip" not in request.headers:
for pattern, handlers in self.handlers:
if pattern.match(self.default_host):
matches.extend(handlers)
return matches or None
def _load_ui_methods(self, methods):
if isinstance(methods, types.ModuleType):
self._load_ui_methods(dict((n, getattr(methods, n))
for n in dir(methods)))
elif isinstance(methods, types.ListType):
for m in methods:
self._load_ui_methods(m)
else:
for name, fn in methods.items():
if not name.startswith("_") and hasattr(fn, "__call__") \
and name[0].lower() == name[0]:
self.ui_methods[name] = fn
def _load_ui_modules(self, modules):
if isinstance(modules, types.ModuleType):
self._load_ui_modules(dict((n, getattr(modules, n))
for n in dir(modules)))
elif isinstance(modules, types.ListType):
for m in modules:
self._load_ui_modules(m)
else:
assert isinstance(modules, types.DictType)
for name, cls in modules.items():
try:
if issubclass(cls, UIModule):
self.ui_modules[name] = cls
except TypeError:
pass
def __call__(self, request):
"""Called by HTTPServer to execute the request."""
transforms = [t(request) for t in self.transforms]
handler = None
args = []
kwargs = {}
handlers = self._get_host_handlers(request)
if not handlers:
handler = RedirectHandler(self, request,
url="http://" + self.default_host + "/")
else:
for spec in handlers:
match = spec.regex.match(request.path)
if match:
handler = spec.handler_class(self, request, **spec.kwargs)
if spec.regex.groups:
# None-safe wrapper around url_unescape to handle
# unmatched optional groups correctly
def unquote(s):
if s is None:
return s
return escape.url_unescape(s, encoding=None)
# Pass matched groups to the handler. Since
# match.groups() includes both named and
# unnamed groups,we want to use either groups
# or groupdict but not both.
# Note that args are passed as bytes so the handler can
# decide what encoding to use.
if spec.regex.groupindex:
kwargs = dict((str(k), unquote(v))
for (k, v) in match.groupdict().items())
else:
args = [unquote(s) for s in match.groups()]
break
if not handler:
handler = ErrorHandler(self, request, status_code=404)
# In debug mode, re-compile templates and reload static files on every
# request so you don't need to restart to see changes
if self.settings.get("debug"):
with RequestHandler._template_loader_lock:
for loader in RequestHandler._template_loaders.values():
loader.reset()
StaticFileHandler.reset()
handler._execute(transforms, *args, **kwargs)
return handler
def reverse_url(self, name, *args):
"""Returns a URL path for handler named `name`
The handler must be added to the application as a named URLSpec.
Args will be substituted for capturing groups in the URLSpec regex.
They will be converted to strings if necessary, encoded as utf8,
and url-escaped.
"""
if name in self.named_handlers:
return self.named_handlers[name].reverse(*args)
raise KeyError("%s not found in named urls" % name)
def log_request(self, handler):
"""Writes a completed HTTP request to the logs.
By default writes to the python root logger. To change
this behavior either subclass Application and override this method,
or pass a function in the application settings dictionary as
'log_function'.
"""
if "log_function" in self.settings:
self.settings["log_function"](handler)
return
request_time = 1000.0 * handler.request.request_time()
log.msg("[" + handler.request.protocol + "] " +
str(handler.get_status()) + " " + handler._request_summary() +
" %.2fms" % request_time)
class HTTPError(Exception):
"""An exception that will turn into an HTTP error response.
:arg int status_code: HTTP status code. Must be listed in
`httplib.responses` unless the ``reason`` keyword argument is given.
:arg string log_message: Message to be written to the log for this error
(will not be shown to the user unless the `Application` is in debug
mode). May contain ``%s``-style placeholders, which will be filled
in with remaining positional parameters.
:arg string reason: Keyword-only argument. The HTTP "reason" phrase
to pass in the status line along with ``status_code``. Normally
determined automatically from ``status_code``, but can be used
to use a non-standard numeric code.
"""
def __init__(self, status_code, log_message=None, *args, **kwargs):
self.status_code = status_code
self.log_message = log_message
self.args = args
self.reason = kwargs.get("reason", None)
def __str__(self):
if self.log_message:
return self.log_message % self.args
else:
return self.reason or \
httplib.responses.get(self.status_code, "Unknown")
class HTTPAuthenticationRequired(HTTPError):
"""An exception that will turn into an HTTP 401, Authentication Required.
The arguments are used to compose the ``WWW-Authenticate`` header.
See http://en.wikipedia.org/wiki/Basic_access_authentication for details.
:arg string auth_type: Authentication type (``Basic``, ``Digest``, etc)
:arg string realm: Realm (Usually displayed by the browser)
"""
def __init__(self, log_message=None,
auth_type="Basic", realm="Restricted Access", **kwargs):
self.status_code = 401
self.log_message = log_message
self.auth_type = auth_type
self.kwargs = kwargs
self.kwargs["realm"] = realm
class ErrorHandler(RequestHandler):
"""Generates an error response with status_code for all requests."""
def initialize(self, status_code):
self.set_status(status_code)
def prepare(self):
raise HTTPError(self._status_code)
def check_xsrf_cookie(self):
# POSTs to an ErrorHandler don't actually have side effects,
# so we don't need to check the xsrf token. This allows POSTs
# to the wrong url to return a 404 instead of 403.
pass
class RedirectHandler(RequestHandler):
"""Redirects the client to the given URL for all GET requests.
You should provide the keyword argument "url" to the handler, e.g.::
application = web.Application([
(r"/oldpath", web.RedirectHandler, {"url": "/newpath"}),
])
"""
def initialize(self, url, permanent=True):
self._url = url
self._permanent = permanent
def get(self):
self.redirect(self._url, permanent=self._permanent)
class StaticFileHandler(RequestHandler):
"""A simple handler that can serve static content from a directory.
To map a path to this handler for a static data directory /var/www,
you would add a line to your application like::
application = web.Application([
(r"/static/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
])
The local root directory of the content should be passed as the "path"
argument to the handler.
To support aggressive browser caching, if the argument "v" is given
with the path, we set an infinite HTTP expiration header. So, if you
want browsers to cache a file indefinitely, send them to, e.g.,
/static/images/myimage.png?v=xxx. Override ``get_cache_time`` method for
more fine-grained cache control.
"""
CACHE_MAX_AGE = 86400 * 365 * 10 # 10 years
_static_hashes = {}
_lock = threading.Lock() # protects _static_hashes
def initialize(self, path, default_filename=None):
self.root = "%s%s" % (os.path.abspath(path), os.path.sep)
self.default_filename = default_filename
@classmethod
def reset(cls):
with cls._lock:
cls._static_hashes = {}
def head(self, path):
self.get(path, include_body=False)
def get(self, path, include_body=True):
path = self.parse_url_path(path)
abspath = os.path.abspath(os.path.join(self.root, path))
# os.path.abspath strips a trailing /
# it needs to be temporarily added back for requests to root/
if not (abspath + os.path.sep).startswith(self.root):
raise HTTPError(403, "%s is not in root static directory", path)
if os.path.isdir(abspath) and self.default_filename is not None:
# need to look at the request.path here for when path is empty
# but there is some prefix to the path that was already
# trimmed by the routing
if not self.request.path.endswith("/"):
self.redirect("%s/" % self.request.path)
abspath = os.path.join(abspath, self.default_filename)
if not os.path.exists(abspath):
raise HTTPError(404)
if not os.path.isfile(abspath):
raise HTTPError(403, "%s is not a file", path)
stat_result = os.stat(abspath)
modified = datetime.datetime.fromtimestamp(stat_result[stat.ST_MTIME])
self.set_header("Last-Modified", modified)
mime_type, encoding = mimetypes.guess_type(abspath)
if mime_type:
self.set_header("Content-Type", mime_type)
cache_time = self.get_cache_time(path, modified, mime_type)
if cache_time > 0:
self.set_header("Expires", "%s%s" % (datetime.datetime.utcnow(),
datetime.timedelta(seconds=cache_time)))
self.set_header("Cache-Control", "max-age=%s" % str(cache_time))
self.set_extra_headers(path)
# Check the If-Modified-Since, and don't send the result if the
# content has not been modified
ims_value = self.request.headers.get("If-Modified-Since")
if ims_value is not None:
date_tuple = email.utils.parsedate(ims_value)
if_since = datetime.datetime.fromtimestamp(time.mktime(date_tuple))
if if_since >= modified:
self.set_status(304)
return
with open(abspath, "rb") as file:
data = file.read()
if include_body:
self.write(data)
else:
assert self.request.method == "HEAD"
self.set_header("Content-Length", len(data))
def set_extra_headers(self, path):
"""For subclass to add extra headers to the response"""
pass
def get_cache_time(self, path, modified, mime_type):
"""Override to customize cache control behavior.
Return a positive number of seconds to trigger aggressive caching or 0
to mark resource as cacheable, only.
By default returns cache expiry of 10 years for resources requested
with "v" argument.
"""
return self.CACHE_MAX_AGE if "v" in self.request.arguments else 0
@classmethod
def make_static_url(cls, settings, path):
"""Constructs a versioned url for the given path.
This method may be overridden in subclasses (but note that it is
a class method rather than an instance method).
``settings`` is the `Application.settings` dictionary. ``path``
is the static path being requested. The url returned should be
relative to the current host.
"""
static_url_prefix = settings.get('static_url_prefix', '/static/')
version_hash = cls.get_version(settings, path)
if version_hash:
return "%s%s?v=%s" % (static_url_prefix, path, version_hash)
return "%s%s" % (static_url_prefix, path)
@classmethod
def get_version(cls, settings, path):
"""Generate the version string to be used in static URLs.
This method may be overridden in subclasses (but note that it
is a class method rather than a static method). The default
implementation uses a hash of the file's contents.
``settings`` is the `Application.settings` dictionary and ``path``
is the relative location of the requested asset on the filesystem.
The returned value should be a string, or ``None`` if no version
could be determined.
"""
abs_path = os.path.join(settings["static_path"], path)
with cls._lock:
hashes = cls._static_hashes
if abs_path not in hashes:
try:
f = open(abs_path, "rb")
hashes[abs_path] = hashlib.md5(f.read()).hexdigest()
f.close()
except Exception:
log.msg("Could not open static file %r" % path)
hashes[abs_path] = None
hsh = hashes.get(abs_path)
if hsh:
return hsh[:5]
return None
def parse_url_path(self, url_path):
"""Converts a static URL path into a filesystem path.
``url_path`` is the path component of the URL with
``static_url_prefix`` removed. The return value should be
filesystem path relative to ``static_path``.
"""
if os.path.sep != "/":
url_path = url_path.replace("/", os.path.sep)
return url_path
class FallbackHandler(RequestHandler):
"""A RequestHandler that wraps another HTTP server callback.
Tornado has this to combine RequestHandlers and WSGI handlers, but it's
not supported in cyclone and is just here for compatibily purposes.
"""
def initialize(self, fallback):
self.fallback = fallback
def prepare(self):
self.fallback(self.request)
self._finished = True
class OutputTransform(object):
"""A transform modifies the result of an HTTP request (e.g., GZip encoding)
A new transform instance is created for every request. See the
ChunkedTransferEncoding example below if you want to implement a
new Transform.
"""
def __init__(self, request):
pass
def transform_first_chunk(self, status_code, headers, chunk, finishing):
return status_code, headers, chunk
def transform_chunk(self, chunk, finishing):
return chunk
class GZipContentEncoding(OutputTransform):
"""Applies the gzip content encoding to the response.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11
"""
CONTENT_TYPES = set([
"text/plain", "text/html", "text/css", "text/xml",
"application/javascript", "application/x-javascript",
"application/xml", "application/atom+xml",
"text/javascript", "application/json", "application/xhtml+xml"])
MIN_LENGTH = 5
def __init__(self, request):
self._gzipping = request.supports_http_1_1() and \
"gzip" in request.headers.get("Accept-Encoding", [])
def transform_first_chunk(self, status_code, headers, chunk, finishing):
if 'Vary' in headers:
headers['Vary'] += ', Accept-Encoding'
else:
headers['Vary'] = 'Accept-Encoding'
if self._gzipping:
ctype = _unicode(headers.get("Content-Type", "")).split(";")[0]
self._gzipping = (ctype in self.CONTENT_TYPES) and \
(not finishing or len(chunk) >= self.MIN_LENGTH) and \
(finishing or "Content-Length" not in headers) and \
("Content-Encoding" not in headers)
if self._gzipping:
headers["Content-Encoding"] = "gzip"
self._gzip_value = BytesIO()
self._gzip_file = gzip.GzipFile(mode="w", fileobj=self._gzip_value)
chunk = self.transform_chunk(chunk, finishing)
if "Content-Length" in headers:
headers["Content-Length"] = str(len(chunk))
return status_code, headers, chunk
def transform_chunk(self, chunk, finishing):
if self._gzipping:
self._gzip_file.write(chunk)
if finishing:
self._gzip_file.close()
else:
self._gzip_file.flush()
chunk = self._gzip_value.getvalue()
self._gzip_value.truncate(0)
self._gzip_value.seek(0)
return chunk
class ChunkedTransferEncoding(OutputTransform):
"""Applies the chunked transfer encoding to the response.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.6.1
"""
def __init__(self, request):
self._chunking = request.supports_http_1_1()
def transform_first_chunk(self, status_code, headers, chunk, finishing):
# 304 responses have no body (not even a zero-length body), and so
# should not have either Content-Length or Transfer-Encoding headers.
if self._chunking and status_code != 304:
# No need to chunk the output if a Content-Length is specified
if "Content-Length" in headers or "Transfer-Encoding" in headers:
self._chunking = False
else:
headers["Transfer-Encoding"] = "chunked"
chunk = self.transform_chunk(chunk, finishing)
return status_code, headers, chunk
def transform_chunk(self, block, finishing):
if self._chunking:
# Don't write out empty chunks because that means END-OF-STREAM
# with chunked encoding
if block:
block = "%s\r\n%s\r\n" % (utf8("%x" % len(block)), block)
if finishing:
block = "%s0\r\n\r\n" % block
return block
def authenticated(method):
"""Decorate methods with this to require that the user be logged in."""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self.current_user:
if self.request.method in ("GET", "HEAD"):
url = self.get_login_url()
if "?" not in url:
if urlparse.urlsplit(url).scheme:
# if login url is absolute, make next absolute too
next_url = self.request.full_url()
else:
next_url = self.request.uri
url = "%s?%s" % (url,
urllib.urlencode(dict(next=next_url)))
return self.redirect(url)
raise HTTPError(403)
return method(self, *args, **kwargs)
return wrapper
class UIModule(object):
"""A UI re-usable, modular unit on a page.
UI modules often execute additional queries, and they can include
additional CSS and JavaScript that will be included in the output
page, which is automatically inserted on page render.
"""
def __init__(self, handler):
self.handler = handler
self.request = handler.request
self.ui = handler.ui
self.current_user = handler.current_user
self.locale = handler.locale
def render(self, *args, **kwargs):
"""Overridden in subclasses to return this module's output."""
raise NotImplementedError()
def embedded_javascript(self):
"""Returns a JavaScript string that will be embedded in the page."""
return None
def javascript_files(self):
"""Returns a list of JavaScript files required by this module."""
return None
def embedded_css(self):
"""Returns a CSS string that will be embedded in the page."""
return None
def css_files(self):
"""Returns a list of CSS files required by this module."""
return None
def html_head(self):
"""Returns a CSS string that will be put in the <head/> element"""
return None
def html_body(self):
"""Returns an HTML string that will be put in the <body/> element"""
return None
def render_string(self, path, **kwargs):
"""Renders a template and returns it as a string."""
return self.handler.render_string(path, **kwargs)
class _linkify(UIModule):
def render(self, text, **kwargs):
return escape.linkify(text, **kwargs)
class _xsrf_form_html(UIModule):
def render(self):
return self.handler.xsrf_form_html()
class TemplateModule(UIModule):
"""UIModule that simply renders the given template.
{% module Template("foo.html") %} is similar to {% include "foo.html" %},
but the module version gets its own namespace (with kwargs passed to
Template()) instead of inheriting the outer template's namespace.
Templates rendered through this module also get access to UIModule's
automatic javascript/css features. Simply call set_resources
inside the template and give it keyword arguments corresponding to
the methods on UIModule: {{ set_resources(js_files=static_url("my.js")) }}
Note that these resources are output once per template file, not once
per instantiation of the template, so they must not depend on
any arguments to the template.
"""
def __init__(self, handler):
super(TemplateModule, self).__init__(handler)
# keep resources in both a list and a dict to preserve order
self._resource_list = []
self._resource_dict = {}
def render(self, path, **kwargs):
def set_resources(**kwargs):
if path not in self._resource_dict:
self._resource_list.append(kwargs)
self._resource_dict[path] = kwargs
else:
if self._resource_dict[path] != kwargs:
raise ValueError("set_resources called with different "
"resources for the same template")
return ""
return self.render_string(path, set_resources=set_resources,
**kwargs)
def _get_resources(self, key):
return (r[key] for r in self._resource_list if key in r)
def embedded_javascript(self):
return "\n".join(self._get_resources("embedded_javascript"))
def javascript_files(self):
result = []
for f in self._get_resources("javascript_files"):
if isinstance(f, (unicode, bytes_type)):
result.append(f)
else:
result.extend(f)
return result
def embedded_css(self):
return "\n".join(self._get_resources("embedded_css"))
def css_files(self):
result = []
for f in self._get_resources("css_files"):
if isinstance(f, (unicode, bytes_type)):
result.append(f)
else:
result.extend(f)
return result
def html_head(self):
return "".join(self._get_resources("html_head"))
def html_body(self):
return "".join(self._get_resources("html_body"))
class URLSpec(object):
"""Specifies mappings between URLs and handlers."""
def __init__(self, pattern, handler_class, kwargs=None, name=None):
"""Creates a URLSpec.
Parameters:
pattern: Regular expression to be matched. Any groups in the regex
will be passed in to the handler's get/post/etc methods as
arguments.
handler_class: RequestHandler subclass to be invoked.
kwargs (optional): A dictionary of additional arguments to be passed
to the handler's constructor.
name (optional): A name for this handler. Used by
Application.reverse_url.
"""
if not pattern.endswith('$'):
pattern += '$'
self.regex = re.compile(pattern)
assert len(self.regex.groupindex) in (0, self.regex.groups), \
("groups in url regexes must either be all named or all "
"positional: %r" % self.regex.pattern)
self.handler_class = handler_class
self.kwargs = kwargs or {}
self.name = name
self._path, self._group_count = self._find_groups()
def __repr__(self):
return '%s(%r, %s, kwargs=%r, name=%r)' % \
(self.__class__.__name__, self.regex.pattern,
self.handler_class, self.kwargs, self.name)
def _find_groups(self):
"""Returns a tuple (reverse string, group count) for a url.
For example: Given the url pattern /([0-9]{4})/([a-z-]+)/, this method
would return ('/%s/%s/', 2).
"""
pattern = self.regex.pattern
if pattern.startswith('^'):
pattern = pattern[1:]
if pattern.endswith('$'):
pattern = pattern[:-1]
if self.regex.groups != pattern.count('('):
# The pattern is too complicated for our simplistic matching,
# so we can't support reversing it.
return (None, None)
pieces = []
for fragment in pattern.split('('):
if ')' in fragment:
paren_loc = fragment.index(')')
if paren_loc >= 0:
pieces.append('%s' + fragment[paren_loc + 1:])
else:
pieces.append(fragment)
return (''.join(pieces), self.regex.groups)
def reverse(self, *args):
assert self._path is not None, \
"Cannot reverse url regex " + self.regex.pattern
assert len(args) == self._group_count, "required number of arguments "\
"not found"
if not len(args):
return self._path
converted_args = []
for a in args:
if not isinstance(a, (unicode_type, bytes_type)):
a = str(a)
converted_args.append(escape.url_escape(utf8(a)))
return self._path % tuple(converted_args)
url = URLSpec
def _time_independent_equals(a, b):
if len(a) != len(b):
return False
result = 0
if isinstance(a[0], types.IntType): # python3 byte strings
for x, y in zip(a, b):
result |= x ^ y
else: # python2
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
def create_signed_value(secret, name, value):
timestamp = utf8(str(int(time.time())))
value = base64.b64encode(utf8(value))
signature = _create_signature(secret, name, value, timestamp)
value = "|".join([value, timestamp, signature])
return value
def decode_signed_value(secret, name, value, max_age_days=31):
if not value:
return None
parts = utf8(value).split("|")
if len(parts) != 3:
return None
signature = _create_signature(secret, name, parts[0], parts[1])
if not _time_independent_equals(parts[2], signature):
log.msg("Invalid cookie signature %r" % value)
return None
timestamp = int(parts[1])
if timestamp < time.time() - max_age_days * 86400:
log.msg("Expired cookie %r" % value)
return None
if timestamp > time.time() + 31 * 86400:
# _cookie_signature does not hash a delimiter between the
# parts of the cookie, so an attacker could transfer trailing
# digits from the payload to the timestamp without altering the
# signature. For backwards compatibility, sanity-check timestamp
# here instead of modifying _cookie_signature.
log.msg("Cookie timestamp in future; possible tampering %r" % value)
return None
if parts[1].startswith("0"):
log.msg("Tampered cookie %r" % value)
try:
return base64.b64decode(parts[0])
except Exception:
return None
def _create_signature(secret, *parts):
hash = hmac.new(utf8(secret), digestmod=hashlib.sha1)
for part in parts:
hash.update(utf8(part))
return utf8(hash.hexdigest())
| gpl-2.0 | 4,279,697,711,647,361,000 | 38.892443 | 88 | 0.589415 | false |
larsyencken/cjktools | cjktools/resources/radkdict.py | 1 | 2575 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# radkdict.py
# cjktools
#
"""
Based on the radkfile, a dictionary mapping character to bag of radicals.
"""
import sys
from cjktools import maps
from cjktools.common import get_stream_context, stream_codec
from . import cjkdata
from six import text_type
def _default_stream():
return open(cjkdata.get_resource('radkfile'))
class RadkDict(dict):
"""
Determines which radicals a character contains.
:param istream:
The radkfile to parse.
"""
def __init__(self, istream=None):
"""
"""
with get_stream_context(_default_stream, istream) as istream:
self._parse_radkfile(stream_codec(istream))
def _parse_radkfile(self, line_stream):
"""
Parses the radkfile and populates the current dictionary.
:param line_stream:
A stream yielding the lines in the radkfile to parse.
"""
radical_to_kanji = {}
radical_to_stroke_count = {}
current_radical = None
stroke_count = None
for line in line_stream:
line = line.rstrip()
if line.startswith('#'):
# found a comment line
continue
if line.startswith('$'):
# found a line with a new radical
parts = line.split()
if len(parts) not in (3, 4):
raise Exception('format error parsing radkfile')
dollar, current_radical, stroke_count = parts[:3]
radical_to_stroke_count[current_radical] = int(stroke_count)
continue
# found a line of kanji
kanji = line.strip()
radical_to_kanji.setdefault(current_radical, []).extend(kanji)
self.update(maps.invert_mapping(radical_to_kanji))
maps.map_dict(tuple, self, in_place=True)
self.radical_to_stroke_count = radical_to_stroke_count
self.radical_to_kanji = radical_to_kanji
@classmethod
def get_cached(cls):
""" Returns a memory-cached class instance. """
cached = getattr(cls, '_cached', cls())
cls._cached = cached
return cls._cached
def print_radicals(kanji_list):
""" Print out each kanji and the radicals it contains. """
radical_dict = RadkDict()
for kanji in kanji_list:
kanji = text_type(kanji)
radicals = radical_dict[kanji]
print('%s: ' % kanji, ' '.join(sorted(radicals)))
if __name__ == '__main__':
print_radicals(sys.argv[1:])
| bsd-3-clause | -4,731,615,904,252,097,000 | 24.49505 | 76 | 0.582524 | false |
Ambuj-UF/ConCat-1.0 | src/Utils/Bio/MaxEntropy.py | 1 | 10435 | # Copyright 2001 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Maximum Entropy code.
Uses Improved Iterative Scaling.
"""
# TODO Define terminology
from __future__ import print_function
from functools import reduce
import sys
# Add path to Bio
sys.path.append('..')
from Bio._py3k import map
import numpy
class MaxEntropy(object):
"""Holds information for a Maximum Entropy classifier.
Members:
classes List of the possible classes of data.
alphas List of the weights for each feature.
feature_fns List of the feature functions.
"""
def __init__(self):
self.classes = []
self.alphas = []
self.feature_fns = []
def calculate(me, observation):
"""calculate(me, observation) -> list of log probs
Calculate the log of the probability for each class. me is a
MaxEntropy object that has been trained. observation is a vector
representing the observed data. The return value is a list of
unnormalized log probabilities for each class.
"""
scores = []
assert len(me.feature_fns) == len(me.alphas)
for klass in me.classes:
lprob = 0.0
for fn, alpha in zip(me.feature_fns, me.alphas):
lprob += fn(observation, klass) * alpha
scores.append(lprob)
return scores
def classify(me, observation):
"""classify(me, observation) -> class
Classify an observation into a class.
"""
scores = calculate(me, observation)
max_score, klass = scores[0], me.classes[0]
for i in range(1, len(scores)):
if scores[i] > max_score:
max_score, klass = scores[i], me.classes[i]
return klass
def _eval_feature_fn(fn, xs, classes):
"""_eval_feature_fn(fn, xs, classes) -> dict of values
Evaluate a feature function on every instance of the training set
and class. fn is a callback function that takes two parameters: a
training instance and a class. Return a dictionary of (training
set index, class index) -> non-zero value. Values of 0 are not
stored in the dictionary.
"""
values = {}
for i in range(len(xs)):
for j in range(len(classes)):
f = fn(xs[i], classes[j])
if f != 0:
values[(i, j)] = f
return values
def _calc_empirical_expects(xs, ys, classes, features):
"""_calc_empirical_expects(xs, ys, classes, features) -> list of expectations
Calculate the expectation of each function from the data. This is
the constraint for the maximum entropy distribution. Return a
list of expectations, parallel to the list of features.
"""
# E[f_i] = SUM_x,y P(x, y) f(x, y)
# = 1/N f(x, y)
class2index = {}
for index, key in enumerate(classes):
class2index[key] = index
ys_i = [class2index[y] for y in ys]
expect = []
N = len(xs)
for feature in features:
s = 0
for i in range(N):
s += feature.get((i, ys_i[i]), 0)
expect.append(float(s) / N)
return expect
def _calc_model_expects(xs, classes, features, alphas):
"""_calc_model_expects(xs, classes, features, alphas) -> list of expectations.
Calculate the expectation of each feature from the model. This is
not used in maximum entropy training, but provides a good function
for debugging.
"""
# SUM_X P(x) SUM_Y P(Y|X) F(X, Y)
# = 1/N SUM_X SUM_Y P(Y|X) F(X, Y)
p_yx = _calc_p_class_given_x(xs, classes, features, alphas)
expects = []
for feature in features:
sum = 0.0
for (i, j), f in feature.items():
sum += p_yx[i][j] * f
expects.append(sum / len(xs))
return expects
def _calc_p_class_given_x(xs, classes, features, alphas):
"""_calc_p_class_given_x(xs, classes, features, alphas) -> matrix
Calculate P(y|x), where y is the class and x is an instance from
the training set. Return a XSxCLASSES matrix of probabilities.
"""
prob_yx = numpy.zeros((len(xs), len(classes)))
# Calculate log P(y, x).
assert len(features) == len(alphas)
for feature, alpha in zip(features, alphas):
for (x, y), f in feature.items():
prob_yx[x][y] += alpha * f
# Take an exponent to get P(y, x)
prob_yx = numpy.exp(prob_yx)
# Divide out the probability over each class, so we get P(y|x).
for i in range(len(xs)):
z = sum(prob_yx[i])
prob_yx[i] = prob_yx[i] / z
return prob_yx
def _calc_f_sharp(N, nclasses, features):
"""_calc_f_sharp(N, nclasses, features) -> matrix of f sharp values."""
# f#(x, y) = SUM_i feature(x, y)
f_sharp = numpy.zeros((N, nclasses))
for feature in features:
for (i, j), f in feature.items():
f_sharp[i][j] += f
return f_sharp
def _iis_solve_delta(N, feature, f_sharp, empirical, prob_yx,
max_newton_iterations, newton_converge):
# Solve delta using Newton's method for:
# SUM_x P(x) * SUM_c P(c|x) f_i(x, c) e^[delta_i * f#(x, c)] = 0
delta = 0.0
iters = 0
while iters < max_newton_iterations: # iterate for Newton's method
f_newton = df_newton = 0.0 # evaluate the function and derivative
for (i, j), f in feature.items():
prod = prob_yx[i][j] * f * numpy.exp(delta * f_sharp[i][j])
f_newton += prod
df_newton += prod * f_sharp[i][j]
f_newton, df_newton = empirical - f_newton / N, -df_newton / N
ratio = f_newton / df_newton
delta -= ratio
if numpy.fabs(ratio) < newton_converge: # converged
break
iters = iters + 1
else:
raise RuntimeError("Newton's method did not converge")
return delta
def _train_iis(xs, classes, features, f_sharp, alphas, e_empirical,
max_newton_iterations, newton_converge):
"""Do one iteration of hill climbing to find better alphas (PRIVATE)."""
# This is a good function to parallelize.
# Pre-calculate P(y|x)
p_yx = _calc_p_class_given_x(xs, classes, features, alphas)
N = len(xs)
newalphas = alphas[:]
for i in range(len(alphas)):
delta = _iis_solve_delta(N, features[i], f_sharp, e_empirical[i], p_yx,
max_newton_iterations, newton_converge)
newalphas[i] += delta
return newalphas
def train(training_set, results, feature_fns, update_fn=None,
max_iis_iterations=10000, iis_converge=1.0e-5,
max_newton_iterations=100, newton_converge=1.0e-10):
"""Train a maximum entropy classifier, returns MaxEntropy object.
Train a maximum entropy classifier on a training set.
training_set is a list of observations. results is a list of the
class assignments for each observation. feature_fns is a list of
the features. These are callback functions that take an
observation and class and return a 1 or 0. update_fn is a
callback function that is called at each training iteration. It is
passed a MaxEntropy object that encapsulates the current state of
the training.
The maximum number of iterations and the convergence criterion for IIS
are given by max_iis_iterations and iis_converge, respectively, while
max_newton_iterations and newton_converge are the maximum number
of iterations and the convergence criterion for Newton's method.
"""
if not training_set:
raise ValueError("No data in the training set.")
if len(training_set) != len(results):
raise ValueError("training_set and results should be parallel lists.")
# Rename variables for convenience.
xs, ys = training_set, results
# Get a list of all the classes that need to be trained.
classes = sorted(set(results))
# Cache values for all features.
features = [_eval_feature_fn(fn, training_set, classes)
for fn in feature_fns]
# Cache values for f#.
f_sharp = _calc_f_sharp(len(training_set), len(classes), features)
# Pre-calculate the empirical expectations of the features.
e_empirical = _calc_empirical_expects(xs, ys, classes, features)
# Now train the alpha parameters to weigh each feature.
alphas = [0.0] * len(features)
iters = 0
while iters < max_iis_iterations:
nalphas = _train_iis(xs, classes, features, f_sharp,
alphas, e_empirical,
max_newton_iterations, newton_converge)
diff = map(lambda x, y: numpy.fabs(x - y), alphas, nalphas)
diff = reduce(lambda x, y: x + y, diff, 0)
alphas = nalphas
me = MaxEntropy()
me.alphas, me.classes, me.feature_fns = alphas, classes, feature_fns
if update_fn is not None:
update_fn(me)
if diff < iis_converge: # converged
break
else:
raise RuntimeError("IIS did not converge")
return me
if __name__ == "__main__":
# Car data from example Naive Bayes Classifier example by Eric Meisner November 22, 2003
# http://www.inf.u-szeged.hu/~ormandi/teaching/mi2/02-naiveBayes-example.pdf
xcar = [
['Red', 'Sports', 'Domestic'],
['Red', 'Sports', 'Domestic'],
['Red', 'Sports', 'Domestic'],
['Yellow', 'Sports', 'Domestic'],
['Yellow', 'Sports', 'Imported'],
['Yellow', 'SUV', 'Imported'],
['Yellow', 'SUV', 'Imported'],
['Yellow', 'SUV', 'Domestic'],
['Red', 'SUV', 'Imported'],
['Red', 'Sports', 'Imported']
]
ycar = [
'Yes',
'No',
'Yes',
'No',
'Yes',
'No',
'Yes',
'No',
'No',
'Yes'
]
# Requires some rules or features
def udf1(ts, cl):
if ts[0] == 'Red':
return 0
else:
return 1
def udf2(ts, cl):
if ts[1] == 'Sports':
return 0
else:
return 1
def udf3(ts, cl):
if ts[2] == 'Domestic':
return 0
else:
return 1
user_functions = [udf1, udf2, udf3] # must be an iterable type
xe = train(xcar, ycar, user_functions)
for xv, yv in zip(xcar, ycar):
xc = classify(xe, xv)
print('Pred: %s gives %s y is %s' % (xv, xc, yv))
| gpl-2.0 | -8,712,417,844,903,718,000 | 30.814024 | 92 | 0.601725 | false |
jtoppins/beaker | IntegrationTests/src/bkr/inttest/server/selenium/test_job_export_xml.py | 1 | 2025 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from bkr.inttest.server.selenium import WebDriverTestCase
from bkr.inttest import data_setup, get_server_base
from turbogears.database import session
from bkr.server.model import Job
import requests
import lxml.etree
from StringIO import StringIO
class JobExportXML(WebDriverTestCase):
maxDiff = None
def setUp(self):
with session.begin():
self.job_to_export = data_setup.create_completed_job()
self.browser = self.get_browser()
def test_export_xml(self):
b = self.browser
# Make sure the Export button is present in the jobs grid. We can't
# actually click it because it triggers a download, which WebDriver
# can't handle.
b.get(get_server_base() + 'jobs/')
b.find_element_by_name('simplesearch').send_keys(unicode(self.job_to_export.id))
b.find_element_by_name('jobsearch_simple').submit()
b.find_element_by_xpath(
'//tr[normalize-space(string(./td[1]))="%s"]'
'//a[text()="Export"]'
% self.job_to_export.t_id)
# Fetch the exported XML directly.
response = requests.get(get_server_base() +
'to_xml?taskid=%s&pretty=False' % self.job_to_export.t_id)
actual = response.content
with session.begin():
# Expire the job, otherwise the exported job XML (read from the
# Python instance) will have a duration attribute while the export
# from the view will have not since our database stores only seconds
session.expire_all()
job = Job.by_id(self.job_to_export.id)
expected = lxml.etree.tostring(job.to_xml(), pretty_print=True, encoding='utf8')
self.assertMultiLineEqual(expected, actual)
| gpl-2.0 | 621,352,936,695,160,400 | 42.085106 | 92 | 0.65284 | false |
samueljackson92/NDImage | ndimage/gui/table_model.py | 1 | 1313 | from PyQt4 import QtCore
class DataFrameTableModel(QtCore.QAbstractTableModel):
def __init__(self, data=None, parent=None):
QtCore.QAbstractTableModel.__init__(self, parent=parent)
self.set_data(data)
def rowCount(self, parent):
return self._data.shape[0] if self._data is not None else 0
def columnCount(self, parent):
return self._data.shape[1] if self._data is not None else 0
def data(self, index, role):
if not index.isValid():
return QtCore.QVariant()
elif role != QtCore.Qt.DisplayRole:
return QtCore.QVariant()
value = self._data.iloc[index.row()][index.column()]
return QtCore.QVariant(str(value))
def get_data(self):
return self._data
def set_data(self, data):
self.beginResetModel()
self._data = data
self.endResetModel()
def headerData(self, section, orientation, role=QtCore.Qt.DisplayRole):
if role == QtCore.Qt.DisplayRole and orientation == QtCore.Qt.Horizontal:
return str(self._data.columns[section])
if role == QtCore.Qt.DisplayRole and orientation == QtCore.Qt.Vertical:
return str(self._data.index[section])
return QtCore.QAbstractTableModel.headerData(self, section, orientation, role)
| mit | -1,186,761,553,999,052,300 | 34.486486 | 86 | 0.648134 | false |
homeworkprod/byceps | tests/services/user_avatar/test_models_image_path.py | 1 | 1182 | """
:Copyright: 2006-2019 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from pathlib import Path
from uuid import UUID
import pytest
from byceps.util.image.models import ImageType
from testfixtures.user import create_user
from testfixtures.user_avatar import create_avatar
from tests.helpers import app_context
@pytest.mark.parametrize('avatar_images_path, avatar_id, image_type, expected', [
(
Path('/var/data/avatars'),
UUID('2e17cb15-d763-4f93-882a-371296a3c63f'),
ImageType.jpeg,
Path('/var/data/avatars/2e17cb15-d763-4f93-882a-371296a3c63f.jpeg'),
),
(
Path('/home/byceps/data/global/users/avatars'),
UUID('f0266761-c37e-4519-8cb8-5812d2bfe595'),
ImageType.png,
Path('/home/byceps/data/global/users/avatars/f0266761-c37e-4519-8cb8-5812d2bfe595.png'),
),
])
def test_path(avatar_images_path, avatar_id, image_type, expected):
user = create_user()
avatar = create_avatar(user.id, id=avatar_id, image_type=image_type)
with app_context() as app:
app.config['PATH_USER_AVATAR_IMAGES'] = avatar_images_path
assert avatar.path == expected
| bsd-3-clause | -2,107,900,215,533,090,800 | 28.55 | 96 | 0.694585 | false |
stdlib-js/stdlib | lib/node_modules/@stdlib/math/base/special/maxabs/benchmark/python/benchmark.py | 1 | 2209 | #!/usr/bin/env python
#
# @license Apache-2.0
#
# Copyright (c) 2018 The Stdlib Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmark maxabs."""
from __future__ import print_function
import timeit
NAME = "maxabs"
REPEATS = 3
ITERATIONS = 1000000
def print_version():
"""Print the TAP version."""
print("TAP version 13")
def print_summary(total, passing):
"""Print the benchmark summary.
# Arguments
* `total`: total number of tests
* `passing`: number of passing tests
"""
print("#")
print("1.." + str(total)) # TAP plan
print("# total " + str(total))
print("# pass " + str(passing))
print("#")
print("# ok")
def print_results(elapsed):
"""Print benchmark results.
# Arguments
* `elapsed`: elapsed time (in seconds)
# Examples
``` python
python> print_results(0.131009101868)
```
"""
rate = ITERATIONS / elapsed
print(" ---")
print(" iterations: " + str(ITERATIONS))
print(" elapsed: " + str(elapsed))
print(" rate: " + str(rate))
print(" ...")
def benchmark():
"""Run the benchmark and print benchmark results."""
setup = "from math import fabs; from random import random;"
stmt = "y = max(fabs(1000.0*random() - 500.0), fabs(1000.0*random() - 500.0))"
t = timeit.Timer(stmt, setup=setup)
print_version()
for i in range(REPEATS):
print("# python::" + NAME)
elapsed = t.timeit(number=ITERATIONS)
print_results(elapsed)
print("ok " + str(i+1) + " benchmark finished")
print_summary(REPEATS, REPEATS)
def main():
"""Run the benchmark."""
benchmark()
if __name__ == "__main__":
main()
| apache-2.0 | -5,144,522,500,454,437,000 | 21.773196 | 82 | 0.626981 | false |
henry0312/LightGBM | python-package/lightgbm/plotting.py | 1 | 25113 | # coding: utf-8
"""Plotting library."""
from copy import deepcopy
from io import BytesIO
import numpy as np
from .basic import Booster, _log_warning
from .compat import GRAPHVIZ_INSTALLED, MATPLOTLIB_INSTALLED
from .sklearn import LGBMModel
def _check_not_tuple_of_2_elements(obj, obj_name='obj'):
"""Check object is not tuple or does not have 2 elements."""
if not isinstance(obj, tuple) or len(obj) != 2:
raise TypeError(f"{obj_name} must be a tuple of 2 elements.")
def _float2str(value, precision=None):
return (f"{value:.{precision}f}"
if precision is not None and not isinstance(value, str)
else str(value))
def plot_importance(booster, ax=None, height=0.2,
xlim=None, ylim=None, title='Feature importance',
xlabel='Feature importance', ylabel='Features',
importance_type='split', max_num_features=None,
ignore_zero=True, figsize=None, dpi=None, grid=True,
precision=3, **kwargs):
"""Plot model's feature importances.
Parameters
----------
booster : Booster or LGBMModel
Booster or LGBMModel instance which feature importance should be plotted.
ax : matplotlib.axes.Axes or None, optional (default=None)
Target axes instance.
If None, new figure and axes will be created.
height : float, optional (default=0.2)
Bar height, passed to ``ax.barh()``.
xlim : tuple of 2 elements or None, optional (default=None)
Tuple passed to ``ax.xlim()``.
ylim : tuple of 2 elements or None, optional (default=None)
Tuple passed to ``ax.ylim()``.
title : string or None, optional (default="Feature importance")
Axes title.
If None, title is disabled.
xlabel : string or None, optional (default="Feature importance")
X-axis title label.
If None, title is disabled.
ylabel : string or None, optional (default="Features")
Y-axis title label.
If None, title is disabled.
importance_type : string, optional (default="split")
How the importance is calculated.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
max_num_features : int or None, optional (default=None)
Max number of top features displayed on plot.
If None or <1, all features will be displayed.
ignore_zero : bool, optional (default=True)
Whether to ignore features with zero importance.
figsize : tuple of 2 elements or None, optional (default=None)
Figure size.
dpi : int or None, optional (default=None)
Resolution of the figure.
grid : bool, optional (default=True)
Whether to add a grid for axes.
precision : int or None, optional (default=3)
Used to restrict the display of floating point values to a certain precision.
**kwargs
Other parameters passed to ``ax.barh()``.
Returns
-------
ax : matplotlib.axes.Axes
The plot with model's feature importances.
"""
if MATPLOTLIB_INSTALLED:
import matplotlib.pyplot as plt
else:
raise ImportError('You must install matplotlib and restart your session to plot importance.')
if isinstance(booster, LGBMModel):
booster = booster.booster_
elif not isinstance(booster, Booster):
raise TypeError('booster must be Booster or LGBMModel.')
importance = booster.feature_importance(importance_type=importance_type)
feature_name = booster.feature_name()
if not len(importance):
raise ValueError("Booster's feature_importance is empty.")
tuples = sorted(zip(feature_name, importance), key=lambda x: x[1])
if ignore_zero:
tuples = [x for x in tuples if x[1] > 0]
if max_num_features is not None and max_num_features > 0:
tuples = tuples[-max_num_features:]
labels, values = zip(*tuples)
if ax is None:
if figsize is not None:
_check_not_tuple_of_2_elements(figsize, 'figsize')
_, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
ylocs = np.arange(len(values))
ax.barh(ylocs, values, align='center', height=height, **kwargs)
for x, y in zip(values, ylocs):
ax.text(x + 1, y,
_float2str(x, precision) if importance_type == 'gain' else x,
va='center')
ax.set_yticks(ylocs)
ax.set_yticklabels(labels)
if xlim is not None:
_check_not_tuple_of_2_elements(xlim, 'xlim')
else:
xlim = (0, max(values) * 1.1)
ax.set_xlim(xlim)
if ylim is not None:
_check_not_tuple_of_2_elements(ylim, 'ylim')
else:
ylim = (-1, len(values))
ax.set_ylim(ylim)
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
ax.grid(grid)
return ax
def plot_split_value_histogram(booster, feature, bins=None, ax=None, width_coef=0.8,
xlim=None, ylim=None,
title='Split value histogram for feature with @index/name@ @feature@',
xlabel='Feature split value', ylabel='Count',
figsize=None, dpi=None, grid=True, **kwargs):
"""Plot split value histogram for the specified feature of the model.
Parameters
----------
booster : Booster or LGBMModel
Booster or LGBMModel instance of which feature split value histogram should be plotted.
feature : int or string
The feature name or index the histogram is plotted for.
If int, interpreted as index.
If string, interpreted as name.
bins : int, string or None, optional (default=None)
The maximum number of bins.
If None, the number of bins equals number of unique split values.
If string, it should be one from the list of the supported values by ``numpy.histogram()`` function.
ax : matplotlib.axes.Axes or None, optional (default=None)
Target axes instance.
If None, new figure and axes will be created.
width_coef : float, optional (default=0.8)
Coefficient for histogram bar width.
xlim : tuple of 2 elements or None, optional (default=None)
Tuple passed to ``ax.xlim()``.
ylim : tuple of 2 elements or None, optional (default=None)
Tuple passed to ``ax.ylim()``.
title : string or None, optional (default="Split value histogram for feature with @index/name@ @feature@")
Axes title.
If None, title is disabled.
@feature@ placeholder can be used, and it will be replaced with the value of ``feature`` parameter.
@index/name@ placeholder can be used,
and it will be replaced with ``index`` word in case of ``int`` type ``feature`` parameter
or ``name`` word in case of ``string`` type ``feature`` parameter.
xlabel : string or None, optional (default="Feature split value")
X-axis title label.
If None, title is disabled.
ylabel : string or None, optional (default="Count")
Y-axis title label.
If None, title is disabled.
figsize : tuple of 2 elements or None, optional (default=None)
Figure size.
dpi : int or None, optional (default=None)
Resolution of the figure.
grid : bool, optional (default=True)
Whether to add a grid for axes.
**kwargs
Other parameters passed to ``ax.bar()``.
Returns
-------
ax : matplotlib.axes.Axes
The plot with specified model's feature split value histogram.
"""
if MATPLOTLIB_INSTALLED:
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
else:
raise ImportError('You must install matplotlib and restart your session to plot split value histogram.')
if isinstance(booster, LGBMModel):
booster = booster.booster_
elif not isinstance(booster, Booster):
raise TypeError('booster must be Booster or LGBMModel.')
hist, bins = booster.get_split_value_histogram(feature=feature, bins=bins, xgboost_style=False)
if np.count_nonzero(hist) == 0:
raise ValueError('Cannot plot split value histogram, '
f'because feature {feature} was not used in splitting')
width = width_coef * (bins[1] - bins[0])
centred = (bins[:-1] + bins[1:]) / 2
if ax is None:
if figsize is not None:
_check_not_tuple_of_2_elements(figsize, 'figsize')
_, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
ax.bar(centred, hist, align='center', width=width, **kwargs)
if xlim is not None:
_check_not_tuple_of_2_elements(xlim, 'xlim')
else:
range_result = bins[-1] - bins[0]
xlim = (bins[0] - range_result * 0.2, bins[-1] + range_result * 0.2)
ax.set_xlim(xlim)
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
if ylim is not None:
_check_not_tuple_of_2_elements(ylim, 'ylim')
else:
ylim = (0, max(hist) * 1.1)
ax.set_ylim(ylim)
if title is not None:
title = title.replace('@feature@', str(feature))
title = title.replace('@index/name@', ('name' if isinstance(feature, str) else 'index'))
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
ax.grid(grid)
return ax
def plot_metric(booster, metric=None, dataset_names=None,
ax=None, xlim=None, ylim=None,
title='Metric during training',
xlabel='Iterations', ylabel='auto',
figsize=None, dpi=None, grid=True):
"""Plot one metric during training.
Parameters
----------
booster : dict or LGBMModel
Dictionary returned from ``lightgbm.train()`` or LGBMModel instance.
metric : string or None, optional (default=None)
The metric name to plot.
Only one metric supported because different metrics have various scales.
If None, first metric picked from dictionary (according to hashcode).
dataset_names : list of strings or None, optional (default=None)
List of the dataset names which are used to calculate metric to plot.
If None, all datasets are used.
ax : matplotlib.axes.Axes or None, optional (default=None)
Target axes instance.
If None, new figure and axes will be created.
xlim : tuple of 2 elements or None, optional (default=None)
Tuple passed to ``ax.xlim()``.
ylim : tuple of 2 elements or None, optional (default=None)
Tuple passed to ``ax.ylim()``.
title : string or None, optional (default="Metric during training")
Axes title.
If None, title is disabled.
xlabel : string or None, optional (default="Iterations")
X-axis title label.
If None, title is disabled.
ylabel : string or None, optional (default="auto")
Y-axis title label.
If 'auto', metric name is used.
If None, title is disabled.
figsize : tuple of 2 elements or None, optional (default=None)
Figure size.
dpi : int or None, optional (default=None)
Resolution of the figure.
grid : bool, optional (default=True)
Whether to add a grid for axes.
Returns
-------
ax : matplotlib.axes.Axes
The plot with metric's history over the training.
"""
if MATPLOTLIB_INSTALLED:
import matplotlib.pyplot as plt
else:
raise ImportError('You must install matplotlib and restart your session to plot metric.')
if isinstance(booster, LGBMModel):
eval_results = deepcopy(booster.evals_result_)
elif isinstance(booster, dict):
eval_results = deepcopy(booster)
else:
raise TypeError('booster must be dict or LGBMModel.')
num_data = len(eval_results)
if not num_data:
raise ValueError('eval results cannot be empty.')
if ax is None:
if figsize is not None:
_check_not_tuple_of_2_elements(figsize, 'figsize')
_, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
if dataset_names is None:
dataset_names = iter(eval_results.keys())
elif not isinstance(dataset_names, (list, tuple, set)) or not dataset_names:
raise ValueError('dataset_names should be iterable and cannot be empty')
else:
dataset_names = iter(dataset_names)
name = next(dataset_names) # take one as sample
metrics_for_one = eval_results[name]
num_metric = len(metrics_for_one)
if metric is None:
if num_metric > 1:
_log_warning("More than one metric available, picking one to plot.")
metric, results = metrics_for_one.popitem()
else:
if metric not in metrics_for_one:
raise KeyError('No given metric in eval results.')
results = metrics_for_one[metric]
num_iteration, max_result, min_result = len(results), max(results), min(results)
x_ = range(num_iteration)
ax.plot(x_, results, label=name)
for name in dataset_names:
metrics_for_one = eval_results[name]
results = metrics_for_one[metric]
max_result, min_result = max(max(results), max_result), min(min(results), min_result)
ax.plot(x_, results, label=name)
ax.legend(loc='best')
if xlim is not None:
_check_not_tuple_of_2_elements(xlim, 'xlim')
else:
xlim = (0, num_iteration)
ax.set_xlim(xlim)
if ylim is not None:
_check_not_tuple_of_2_elements(ylim, 'ylim')
else:
range_result = max_result - min_result
ylim = (min_result - range_result * 0.2, max_result + range_result * 0.2)
ax.set_ylim(ylim)
if ylabel == 'auto':
ylabel = metric
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
ax.grid(grid)
return ax
def _to_graphviz(tree_info, show_info, feature_names, precision=3,
orientation='horizontal', constraints=None, **kwargs):
"""Convert specified tree to graphviz instance.
See:
- https://graphviz.readthedocs.io/en/stable/api.html#digraph
"""
if GRAPHVIZ_INSTALLED:
from graphviz import Digraph
else:
raise ImportError('You must install graphviz and restart your session to plot tree.')
def add(root, total_count, parent=None, decision=None):
"""Recursively add node or edge."""
if 'split_index' in root: # non-leaf
l_dec = 'yes'
r_dec = 'no'
if root['decision_type'] == '<=':
lte_symbol = "≤"
operator = lte_symbol
elif root['decision_type'] == '==':
operator = "="
else:
raise ValueError('Invalid decision type in tree model.')
name = f"split{root['split_index']}"
if feature_names is not None:
label = f"<B>{feature_names[root['split_feature']]}</B> {operator}"
else:
label = f"feature <B>{root['split_feature']}</B> {operator} "
label += f"<B>{_float2str(root['threshold'], precision)}</B>"
for info in ['split_gain', 'internal_value', 'internal_weight', "internal_count", "data_percentage"]:
if info in show_info:
output = info.split('_')[-1]
if info in {'split_gain', 'internal_value', 'internal_weight'}:
label += f"<br/>{_float2str(root[info], precision)} {output}"
elif info == 'internal_count':
label += f"<br/>{output}: {root[info]}"
elif info == "data_percentage":
label += f"<br/>{_float2str(root['internal_count'] / total_count * 100, 2)}% of data"
fillcolor = "white"
style = ""
if constraints:
if constraints[root['split_feature']] == 1:
fillcolor = "#ddffdd" # light green
if constraints[root['split_feature']] == -1:
fillcolor = "#ffdddd" # light red
style = "filled"
label = f"<{label}>"
graph.node(name, label=label, shape="rectangle", style=style, fillcolor=fillcolor)
add(root['left_child'], total_count, name, l_dec)
add(root['right_child'], total_count, name, r_dec)
else: # leaf
name = f"leaf{root['leaf_index']}"
label = f"leaf {root['leaf_index']}: "
label += f"<B>{_float2str(root['leaf_value'], precision)}</B>"
if 'leaf_weight' in show_info:
label += f"<br/>{_float2str(root['leaf_weight'], precision)} weight"
if 'leaf_count' in show_info:
label += f"<br/>count: {root['leaf_count']}"
if "data_percentage" in show_info:
label += f"<br/>{_float2str(root['leaf_count'] / total_count * 100, 2)}% of data"
label = f"<{label}>"
graph.node(name, label=label)
if parent is not None:
graph.edge(parent, name, decision)
graph = Digraph(**kwargs)
rankdir = "LR" if orientation == "horizontal" else "TB"
graph.attr("graph", nodesep="0.05", ranksep="0.3", rankdir=rankdir)
if "internal_count" in tree_info['tree_structure']:
add(tree_info['tree_structure'], tree_info['tree_structure']["internal_count"])
else:
raise Exception("Cannot plot trees with no split")
if constraints:
# "#ddffdd" is light green, "#ffdddd" is light red
legend = """<
<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0" CELLPADDING="4">
<TR>
<TD COLSPAN="2"><B>Monotone constraints</B></TD>
</TR>
<TR>
<TD>Increasing</TD>
<TD BGCOLOR="#ddffdd"></TD>
</TR>
<TR>
<TD>Decreasing</TD>
<TD BGCOLOR="#ffdddd"></TD>
</TR>
</TABLE>
>"""
graph.node("legend", label=legend, shape="rectangle", color="white")
return graph
def create_tree_digraph(booster, tree_index=0, show_info=None, precision=3,
orientation='horizontal', **kwargs):
"""Create a digraph representation of specified tree.
Each node in the graph represents a node in the tree.
Non-leaf nodes have labels like ``Column_10 <= 875.9``, which means
"this node splits on the feature named "Column_10", with threshold 875.9".
Leaf nodes have labels like ``leaf 2: 0.422``, which means "this node is a
leaf node, and the predicted value for records that fall into this node
is 0.422". The number (``2``) is an internal unique identifier and doesn't
have any special meaning.
.. note::
For more information please visit
https://graphviz.readthedocs.io/en/stable/api.html#digraph.
Parameters
----------
booster : Booster or LGBMModel
Booster or LGBMModel instance to be converted.
tree_index : int, optional (default=0)
The index of a target tree to convert.
show_info : list of strings or None, optional (default=None)
What information should be shown in nodes.
- ``'split_gain'`` : gain from adding this split to the model
- ``'internal_value'`` : raw predicted value that would be produced by this node if it was a leaf node
- ``'internal_count'`` : number of records from the training data that fall into this non-leaf node
- ``'internal_weight'`` : total weight of all nodes that fall into this non-leaf node
- ``'leaf_count'`` : number of records from the training data that fall into this leaf node
- ``'leaf_weight'`` : total weight (sum of hessian) of all observations that fall into this leaf node
- ``'data_percentage'`` : percentage of training data that fall into this node
precision : int or None, optional (default=3)
Used to restrict the display of floating point values to a certain precision.
orientation : string, optional (default='horizontal')
Orientation of the tree.
Can be 'horizontal' or 'vertical'.
**kwargs
Other parameters passed to ``Digraph`` constructor.
Check https://graphviz.readthedocs.io/en/stable/api.html#digraph for the full list of supported parameters.
Returns
-------
graph : graphviz.Digraph
The digraph representation of specified tree.
"""
if isinstance(booster, LGBMModel):
booster = booster.booster_
elif not isinstance(booster, Booster):
raise TypeError('booster must be Booster or LGBMModel.')
model = booster.dump_model()
tree_infos = model['tree_info']
if 'feature_names' in model:
feature_names = model['feature_names']
else:
feature_names = None
monotone_constraints = model.get('monotone_constraints', None)
if tree_index < len(tree_infos):
tree_info = tree_infos[tree_index]
else:
raise IndexError('tree_index is out of range.')
if show_info is None:
show_info = []
graph = _to_graphviz(tree_info, show_info, feature_names, precision,
orientation, monotone_constraints, **kwargs)
return graph
def plot_tree(booster, ax=None, tree_index=0, figsize=None, dpi=None,
show_info=None, precision=3, orientation='horizontal', **kwargs):
"""Plot specified tree.
Each node in the graph represents a node in the tree.
Non-leaf nodes have labels like ``Column_10 <= 875.9``, which means
"this node splits on the feature named "Column_10", with threshold 875.9".
Leaf nodes have labels like ``leaf 2: 0.422``, which means "this node is a
leaf node, and the predicted value for records that fall into this node
is 0.422". The number (``2``) is an internal unique identifier and doesn't
have any special meaning.
.. note::
It is preferable to use ``create_tree_digraph()`` because of its lossless quality
and returned objects can be also rendered and displayed directly inside a Jupyter notebook.
Parameters
----------
booster : Booster or LGBMModel
Booster or LGBMModel instance to be plotted.
ax : matplotlib.axes.Axes or None, optional (default=None)
Target axes instance.
If None, new figure and axes will be created.
tree_index : int, optional (default=0)
The index of a target tree to plot.
figsize : tuple of 2 elements or None, optional (default=None)
Figure size.
dpi : int or None, optional (default=None)
Resolution of the figure.
show_info : list of strings or None, optional (default=None)
What information should be shown in nodes.
- ``'split_gain'`` : gain from adding this split to the model
- ``'internal_value'`` : raw predicted value that would be produced by this node if it was a leaf node
- ``'internal_count'`` : number of records from the training data that fall into this non-leaf node
- ``'internal_weight'`` : total weight of all nodes that fall into this non-leaf node
- ``'leaf_count'`` : number of records from the training data that fall into this leaf node
- ``'leaf_weight'`` : total weight (sum of hessian) of all observations that fall into this leaf node
- ``'data_percentage'`` : percentage of training data that fall into this node
precision : int or None, optional (default=3)
Used to restrict the display of floating point values to a certain precision.
orientation : string, optional (default='horizontal')
Orientation of the tree.
Can be 'horizontal' or 'vertical'.
**kwargs
Other parameters passed to ``Digraph`` constructor.
Check https://graphviz.readthedocs.io/en/stable/api.html#digraph for the full list of supported parameters.
Returns
-------
ax : matplotlib.axes.Axes
The plot with single tree.
"""
if MATPLOTLIB_INSTALLED:
import matplotlib.image as image
import matplotlib.pyplot as plt
else:
raise ImportError('You must install matplotlib and restart your session to plot tree.')
if ax is None:
if figsize is not None:
_check_not_tuple_of_2_elements(figsize, 'figsize')
_, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
graph = create_tree_digraph(booster=booster, tree_index=tree_index,
show_info=show_info, precision=precision,
orientation=orientation, **kwargs)
s = BytesIO()
s.write(graph.pipe(format='png'))
s.seek(0)
img = image.imread(s)
ax.imshow(img)
ax.axis('off')
return ax
| mit | -408,649,031,525,272,200 | 39.309791 | 115 | 0.614781 | false |
darknightghost/AutoDeployer | cmd_pipe/SubProc.py | 1 | 3581 | #! /usr/bin/python3
# -*- coding: utf-8 -*-
'''
Copyright 2016,王思远 <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os
import sys
class SubProc:
def __init__(self, path, argv, encoding = 'utf-8'):
self.path = path
self.argv = argv
self.encoding = encoding
#Get file descriptors
self.stdinFd = sys.stdin.fileno()
self.stdoutFd = sys.stdout.fileno()
self.stderrFd = sys.stderr.fileno()
#Create pipe
self.parentIn, self.childStdout = os.pipe()
self.childStdin, self.parentOut = os.pipe()
pid = os.fork()
if pid == 0:
self.is_child()
else:
self.child_id = pid
self.is_parent()
self.buffer = ""
def is_child(self):
os.close(self.parentIn)
os.close(self.parentOut)
os.dup2(self.childStdin, self.stdinFd)
os.dup2(self.childStdout, self.stdoutFd)
os.dup2(self.childStdout, self.stderrFd)
os.execv(self.path, self.argv)
def is_parent(self):
os.close(self.childStdin)
os.close(self.childStdout)
def read(self):
bs = os.read(self.parentIn, 1024)
ret = self.buffer + bs.decode(encoding = self.encoding,
errors = 'ignore')
self.buffer = ""
return ret
def read_until(self, *strings):
for string in strings:
if string in self.buffer:
index = self.buffer.index(string)
ret = self.buffer[: index] = string
self.buffer = self.buffer[index + len(self.buffer): ]
return ret, strings.index(string)
str_len = 0
for string in strings:
str_len = max(str_len, len(string))
cmp_str = self.buffer[-str_len :]
while True:
bs = os.read(self.parentIn, 1024)
s = self.buffer + bs.decode(encoding = self.encoding,
errors = 'ignore')
cmp_str += s
self.buffer += s
for string in strings:
if string in cmp_str:
index = self.buffer.index(string)
ret = self.buffer[: index] = string
self.buffer = self.buffer[index + len(self.buffer): ]
return ret, strings.index(string)
cmp_str = cmp_str[-str_len :]
print("log" + cmp_str)
def write(self, string):
os.write(self.parentOut, string.encode(encoding = self.encoding,
errors = 'ignore'))
def read_line(self):
return self.read_until('\n')
def close(self):
os.close(self.parentIn)
os.close(self.parentOut)
os.wait()
def __del__(self):
os.close(self.parentIn)
os.close(self.parentOut)
try:
os.kill(self.child_id, 9)
except Exception:
pass
os.wait()
| gpl-3.0 | -127,845,725,472,308,430 | 29.29661 | 74 | 0.567552 | false |
martinzlocha/mad | portal/forms.py | 1 | 2190 | from django import forms
from django.core.exceptions import ValidationError
from portal.models import Student, Hobby
class SignUpForm(forms.ModelForm):
class Meta:
model = Student
fields = ['name', 'username', 'gender', 'course']
widgets = {
'name': forms.TextInput(attrs={'placeholder': 'Ideally similar to name on FB'}),
'username': forms.TextInput(attrs={'placeholder': 'xy1217'}),
}
def clean_username(self):
username = self.cleaned_data['username']
# ToDo(martinzlocha): Check if the username provided looks valid
return username
class PreferenceForm(forms.ModelForm):
hobbies = forms.ModelMultipleChoiceField(widget=forms.CheckboxSelectMultiple(), required=False,
queryset=Hobby.objects.all())
class Meta:
model = Student
fields = ('party', 'hobbies')
def __init__(self, *args, **kwargs):
super(PreferenceForm, self).__init__(*args, **kwargs)
self.fields['party'].label = "Do you enjoy clubbing/partying/drinking?"
self.fields['hobbies'].label = "What are your hobbies? (Maximum 5 responses)"
def clean_hobbies(self):
hobbies = self.cleaned_data['hobbies']
if len(hobbies) > 5:
raise ValidationError("Maximum of 5 hobbies.")
return hobbies
class PartnerForm(forms.ModelForm):
class Meta:
model = Student
fields = ('partner',)
def __init__(self, *args, **kwargs):
super(PartnerForm, self).__init__(*args, **kwargs)
self.instance = kwargs.pop('instance', None)
self.fields['partner'].help_text = "If you don't have a partner then one will be allocated to you with " \
"similar hobbies."
choice = Student.objects.filter(confirmed=False, child=False).exclude(username__contains=self.instance.username)
self.fields["partner"].queryset = choice
def get_successful_proposal_popup(self):
message = "Proposal has been successfully sent to %s." % self.cleaned_data['partner']
return {'message': message, 'state': 'success'}
| mit | -1,180,396,039,648,989,400 | 34.322581 | 120 | 0.620091 | false |
citiususc/construe | construe/utils/signal_processing/wave_extraction.py | 1 | 8755 | # -*- coding: utf-8 -*-
# pylint: disable-msg=C0103
"""
Created on Thu Oct 17 13:15:52 2013
This module provides the functionality to obtain basic primitive structures,
called "peaks", from a signal fragment and its corresponding simplification
using the Douglas-Peucker algorithm. The process is based on the paper:
"Trahanias: Syntactic pattern recognition of the ECG, 1990".
@author: T. Teijeiro
"""
from .signal_measures import get_peaks
from ..units_helper import (msec2samples as ms2sp, phys2digital as ph2dg,
digital2phys as dg2ph, digital2mm as dg2mm,
samples2mm as sp2mm)
import numpy as np
import math
###############################################################################
# Amplitude and duration thresholds for the waves, extracted from the paper: #
# European Heart Journal: Recommendations for measurement standards in #
# quantitative electrocardiography. (1985) #
###############################################################################
MIN_AMP = ph2dg(0.05)
MIN_DUR = ms2sp(10.)
#Custom threshold, taken as an intuitive reference
MIN_ANGLE = math.pi/4.0
class Wave(object):
"""
This class provides the model of a Peak as is defined in the paper in which
this module is based. We have added an amplitude attribute.
"""
__slots__ = ('pts', 'e', 'amp')
def __init__(self):
#X coordinates for the left bound, peak, and right bound.
self.pts = (0, 0, 0)
#Wave energy
self.e = 0.0
#Wave amplitude
self.amp = 0.0
def __str__(self):
return '{0} - {1} - {2}, e = {3}, amp = {4} mV'.format(self.l,
self.m, self.r, self.e, dg2ph(self.amp))
def __repr__(self):
return str(self)
def __eq__(self, other):
return (type(self) is type(other) and self.e == other.e
and self.amp == other.amp and self.pts == other.pts)
@property
def sign(self):
"""
Obtains whether this Wave is a positive or negative Wave.
"""
return np.sign(self.amp)
@property
def l(self):
"""Returns the left boundary of the wave"""
return self.pts[0]
@property
def r(self):
"""Returns the left boundary of the wave"""
return self.pts[2]
@property
def m(self):
"""Returns the left boundary of the wave"""
return self.pts[1]
@property
def dur(self):
"""Returns the duration of the wave"""
return self.pts[2] - self.pts[0]
def move(self, displacement):
"""
Moves the wave a certain time, by adding the displacement value to
each bound and peak.
"""
self.pts = tuple(p+displacement for p in self.pts)
def extract_waves(signal, points, baseline= None):
"""
Obtains the sequence of *Wave* objects present in a signal fragment, based
on the shape simplification determined by points.
Parameters
----------
signal:
Raw signal fragment.
points:
Indices of the relevant points in the signal, that will be used to
determine the peaks.
Returns
-------
out:
Tuple of *Wave* objects.
"""
if baseline is None or not np.min(signal) <= baseline <= np.max(signal):
baseline = signal[0] - (signal[0]-signal[-1])/2.0
result = []
#Angle between two points
angle = lambda a, b : math.atan(dg2mm(abs(signal[b]-signal[a]))/sp2mm(b-a))
pks = points[get_peaks(signal[points])]
#If there are no peaks, there are no waves.
if len(pks) == 0:
return tuple()
#The limits of the waves will be the baseline level, or an angle decrease.
for i in range(len(pks)):
newpk = Wave()
#The limits of each wave is the next and the prevoius peak.
lb = 0 if i == 0 else pks[i-1]
#Left slope
idx = np.where(points==lb)[0][0]
while (points[idx] < pks[i] and (angle(points[idx], pks[i]) < MIN_ANGLE
or angle(points[idx], points[idx+1]) < MIN_ANGLE)):
idx += 1
#If we stop in the peak, we discard a wave in that peak.
if points[idx] == pks[i]:
continue
lb = points[idx]
#Right slope
rb = points[-1] if i == len(pks)-1 else pks[i+1]
idx = np.where(points==rb)[0][0]
while (points[idx] > pks[i] and (angle(pks[i], points[idx]) < MIN_ANGLE
or angle(points[idx-1], points[idx]) < MIN_ANGLE)):
idx -= 1
if points[idx] == pks[i]:
continue
rb = points[idx]
#Now we have ensured to meet minimum angle requirements. We now check
#duration and amplitude.
newpk.pts = (lb, pks[i], rb)
fref = min if signal[newpk.m] > signal[lb] else max
newpk.amp = signal[newpk.m] - fref(signal[rb], signal[lb])
#We remove peaks not satisfying basic constraints.
if (newpk.dur >= MIN_DUR and abs(newpk.amp) >= MIN_AMP):
result.append(newpk)
#The limits of consecutive waves have to be refined.
_refine_wave_limits(result, signal, baseline)
return tuple(result)
def _refine_wave_limits(waves, signal, baseline):
"""
This auxiliary function checks a sequence of wave objects, join two
consecutive waves if they are very close, and establishing the proper
join point if they overlap.
"""
i = 0
while i < len(waves):
#First we check for overlaps with the precedent wave
if i > 0 and waves[i].l < waves[i-1].r:
#The join point is the point closer to the baseline.
jp = waves[i].l + np.argmin(np.abs(
signal[waves[i].l:waves[i-1].r+1]-baseline))
waves[i].pts = (jp, waves[i].m, waves[i].r)
#And then for overlaps with the next one
if i < len(waves)-1 and waves[i].r > waves[i+1].l:
jp = waves[i+1].l + np.argmin(np.abs(
signal[waves[i+1].l:waves[i].r+1]-baseline))
waves[i].pts = (waves[i].l, waves[i].m, jp)
#Now we recompute amplitude.
fref = min if signal[waves[i].m] > signal[waves[i].l] else max
waves[i].amp = signal[waves[i].m] - fref(signal[waves[i].l],
signal[waves[i].r])
if (abs(waves[i].amp) < MIN_AMP or waves[i].dur < MIN_DUR or
waves[i].l == waves[i].m or waves[i].m == waves[i].r):
waves.pop(i)
else:
waves[i].e = np.sum(np.diff(signal[waves[i].l:waves[i].r+1])**2)
i += 1
#Now we join waves that are very close
for i in range(1, len(waves)):
sep = waves[i].l - waves[i-1].r
if 0 < sep < MIN_DUR:
#We join the waves in the maximum deviation point from the
#baseline.
pk = waves[i-1].r + np.argmax(np.abs(
signal[waves[i-1].r:waves[i].l+1]-baseline))
waves[i-1].pts = (waves[i-1].l, waves[i-1].m, pk)
waves[i].pts = (pk, waves[i].m, waves[i].r)
if __name__ == "__main__":
import matplotlib.pyplot as plt
#Small tests with a real delineated QRS example.
#Example 1: Record 124, beat 0, lead MLII
pts = np.array([ 0, 8, 14, 23, 27, 30, 42])
sig = np.array([837, 841, 854, 874, 893, 910, 924, 931,
935, 925, 902, 874, 840, 821, 821, 842,
880, 929, 982, 1031, 1076, 1122, 1162, 1200,
1229, 1250, 1262, 1263, 1257, 1241, 1218, 1187,
1151, 1109, 1067, 1024, 981, 938, 895, 857,
828, 810, 799])
#Example 2: Record 124, beat 0, lead V4
pts = np.array([ 0, 7, 9, 12, 14, 22])
sig = np.array([ 875., 886., 901., 928., 952., 970., 975., 972.,
955., 921., 868., 811., 758., 725., 717., 733.,
764., 803., 840., 871., 897., 915., 926.])
#Example 2: Record 113, beat 0
pts = np.array([ 0, 8, 10, 14, 17, 22, 28])
sig = np.array([ 1042., 1046., 1053., 1059., 1066., 1074., 1079.,
1078., 1082., 1080., 1069., 1053., 1031., 1009.,
990., 978., 965., 965., 971., 987., 1011.,
1023., 1032., 1030., 1025., 1027., 1034., 1041.,
1045.])
plt.figure()
plt.plot(sig, '--')
plt.plot(pts, sig[pts], 'bo')
for p in extract_waves(sig, pts):
x = np.array(p.pts)
plt.plot(x, sig[x])
print(str(p))
| agpl-3.0 | 7,635,516,548,468,927,000 | 37.231441 | 79 | 0.531468 | false |
endreman0/Excalibot | excalibot/cogs/voice.py | 1 | 4864 | import asyncio, discord
from discord.ext.commands import guild_only
from .. import db, log
from ..core import command
class VoiceText(metaclass=command.Cog):
async def on_voice_state_update(self, member, before, after):
if before.channel is after.channel: return # Only care about changing channels
with self.bot.session as session:
give_links = tuple() if after.channel is None else session.get(TextVoiceLink, voice_id=after.channel.id).all()
take_links = tuple() if before.channel is None else session.get(TextVoiceLink, voice_id=before.channel.id).all()
if give_links:
give_roles = tuple(member.guild.get_role(link.role_id) for link in give_links)
await member.add_roles(*give_roles)
if take_links:
take_roles = tuple(member.guild.get_role(link.role_id) for link in take_links)
await member.remove_roles(*take_roles)
@guild_only()
@command.group(name='voicetext', aliases=['vt', 'voice'])
async def base_command(self, ctx):
"""Voice-text bridge, linking voice channels to text channels.
Any number of text channels can be connected to any number of voice channels.
For example, you can link all voice channels to a general voice-text channel, and also link each one to one or more text channels for that specific voice channel."""
await ctx.send('Use `{}help {}` for info'.format(ctx.prefix, ctx.invoked_with))
@base_command.command()
async def link(self, ctx, text: discord.TextChannel, *, voice: discord.VoiceChannel):
"""Link an existing text channel to a voice channel."""
with ctx.session:
link = ctx.session.get(TextVoiceLink, text_id=text.id, voice_id=voice.id).one_or_none()
if link is not None:
return await ctx.send('BAKA! Those channels are already linked!')
role = await self._create_role(ctx.guild, text, voice, 'Voice link requested by {}'.format(ctx.author))
link = ctx.session.add(TextVoiceLink(role_id=role.id, text_id=text.id, voice_id=voice.id))
await ctx.send('Linked {} to "{}"', text.mention, voice.name)
link.example_usage = """
`{prefix}voicetext link #voice-1 Voice 1` - link the text channel #voice-1 with the voice channel "Voice 1", so that users in voice get access to the text channel
"""
@base_command.command()
async def make(self, ctx, *, voice: discord.VoiceChannel):
"""Create a text channel and link it to the given voice channel."""
msg = await ctx.send('Creating text channel for {}', voice.name)
text = await ctx.guild.create_text_channel('voice-' + voice.name.lower().replace(' ', '-'), reason='Voice link requested by {}'.format(ctx.author))
with ctx.session:
role = await self._create_role(ctx.guild, text, voice, 'Voice link requested by {}'.format(ctx.author))
link = ctx.session.add(TextVoiceLink(role_id=role.id, text_id=text.id, voice_id=voice.id))
await ctx.send('Created {} and linked it to "{}"', text.mention, voice.name)
make.example_usage = """
`{prefix}voicetext make Voice 1` - creats a text channel #voice-1 with the voice channel "Voice 1", so that users in voice get access to the text channel
"""
@base_command.command()
async def unlink(self, ctx, text: discord.TextChannel, *, voice: discord.VoiceChannel):
"""Unlinks a voice channel and deletes the corresponding role."""
with ctx.session:
link = ctx.session.get(TextVoiceLink, text_id=text.id, voice_id=voice.id).one_or_none()
if link is None:
return await ctx.send('BAKA! Those channels are not linked!')
role_id, text_id, voice_id = link.role_id, link.text_id, link.voice_id
ctx.session.delete(link)
role = ctx.guild.get_role(role_id)
if role is None:
await ctx.send('Unlinked {} from "{}" and deleted the "{}" role.', text.mention, voice.name, role.name)
else:
await self._delete_role(ctx.guild, role, text, voice, 'Voice unlink requested by {}'.format(ctx.author))
await ctx.send('Unlinked {} from "{}" and deleted the "{}" role.', text.mention, voice.name, role.name)
unlink.example_usage = """
`{prefix}voicetext unlink #voice-1 Voice 1` - unlink the text channel #voice-1 from the voice channel "Voice 1"
"""
async def _create_role(self, guild, text, voice, reason):
role = await guild.create_role(name='Voice {}'.format(voice.name), reason=reason)
await text.set_permissions(guild.default_role, read_messages=False, reason=reason)
await text.set_permissions(role, read_messages=True, reason=reason)
return role
async def _delete_role(self, guild, role, text, voice, reason):
await text.set_permissions(guild.default_role, read_messages=None, reason=reason)
await text.set_permissions(role, overwrite=None, reason=reason)
await role.delete(reason=reason)
class TextVoiceLink(db.DatabaseObject):
__tablename__ = 'voicetext_links'
text_id = db.Column(db.Integer, primary_key=True)
voice_id = db.Column(db.Integer, primary_key=True)
role_id = db.Column(db.Integer, unique=True)
| gpl-3.0 | -6,292,609,427,924,506,000 | 52.450549 | 167 | 0.717516 | false |
piotrmaslanka/cyrkus | redaction/plugins/httpifc/__init__.py | 1 | 2636 | # coding=UTF-8
"""
SMS Plus GSM MultiInfo gateway
"""
from __future__ import division
from hashlib import sha1
import unicodedata, httplib, urllib, urlparse, json, BaseHTTPServer, time
from satella.threads import BaseThread
from cyrkus.redaction.plugins.httpifc.format import fformat
class HttpServerThread(BaseThread):
class HTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def __init__(self, request, client_address, server):
BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, request, client_address, server)
self.request = request
def do_GET(self):
if self.path == '/':
self.send_response(200)
self.send_header('Content-Type', 'text/html; charset=UTF-8')
self.end_headers()
self.wfile.write(fformat(self.server.plugin.last_data, self.server.plugin.last_records))
else:
self.send_response(404)
def do_POST(self):
nodename = self.path[1:]
try:
ld = self.server.plugin.last_data[nodename]
lr = self.server.plugin.last_records[nodename]
except KeyError:
self.send_error(404)
return
self.send_response(200)
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(json.dumps(
{
'node': ld,
'secs_ago': int(time.time() - lr)
}
))
def __init__(self, calling_plugin, listening_ifc):
BaseThread.__init__(self)
self.plugin = calling_plugin
self.listen_ifc = tuple(listening_ifc)
def run(self):
httpd = BaseHTTPServer.HTTPServer(self.listen_ifc, HttpServerThread.HTTPRequestHandler)
httpd.plugin = self.plugin
httpd.serve_forever()
class Plugin(object):
def __init__(self, config, plugins):
"""@param config: configuration, as seen in config.json"""
self.config = config
self.plugins = plugins
self.last_data = {} # nodename => data dict
self.last_records = {} # nodename => last transmit time (timestamp)
self.http_server = HttpServerThread(self, config['listen_interface'])
self.http_server.start()
def on_received_report(self, data):
"""Received a report from other node"""
self.last_data[data['nodename']] = data
self.last_records[data['nodename']] = time.time()
| bsd-3-clause | -2,995,706,234,309,641,700 | 34.146667 | 104 | 0.588771 | false |
pmeas/pmeas-backend | __main__.py | 1 | 10740 | #!/usr/bin/python
GPIO_CAPABLE = False
import time
from functools import partial
import signal
import sys
import pyo
try:
import RPi.GPIO as GPIO
GPIO_CAPABLE = True
except ImportError:
pass
if GPIO_CAPABLE:
import gpiocontrol
import bridge
import configparser
import jackserver
import flanger
import socket
SOCKET_TIMEOUT = 30 #seconds
button_pin = 17
PYO_INIT_SETTINGS = {
'audio':'jack',
'nchnls':1
}
def start_pyo_server():
"""Start the Pyo server
Return the pyo instance of the server
"""
print("Attempting to start the pyo server")
pyo_server = pyo.Server(**PYO_INIT_SETTINGS).boot()
print("Pyo server booted")
pyo_server.start()
print("Pyo server started")
return pyo_server
def stop_pyo_server(pyo_server):
"""Stop the Pyo server
"""
print("Attempting to stop the pyo server")
pyo_server.stop()
print("Pyo server stopped")
def chain_effects( initial_source, config_effects_dict ):
'''
Loop through the effects and assembles their configuration in order according to their keys.
initial_source - the medium by which the audio stream is read (i.e through the input port)
config_effects_dict - the list of effects to enable on top of the audio stream.
'''
vol = 1 #default volume
enabled_effects = [initial_source]
# Make the source of the next effect the previously applied effect.
source = enabled_effects[len(enabled_effects) - 1]
# If the volume was set, change the default value to the requested volume.
if "volume" in config_effects_dict:
vol = config_effects_dict.pop("volume")
enabled_effects.append(pyo.Tone(
source,
freq = 20000,
mul = vol
)
)
# Run through all the effects in our configuration file and apply
# them to the previously used stream (i.e source)
for effect in sorted(config_effects_dict.keys()):
source = enabled_effects[len(enabled_effects) - 1]
# print("Effect: " + effect + ", Params: " + str(effects_dict[effect]))
params = config_effects_dict[effect]
if params['name'] == 'distortion':
# distortion stuff
print("Enable distortion effect")
enabled_effects.append(pyo.Disto(
source,
drive=float(params['drive']),
slope=float(params['slope']),
mul = vol
)
)
elif params['name'] == 'delay':
# delay stuff
print("Enable delay effect")
enabled_effects.append(pyo.Delay(
source,
delay=[0, float(params['delay'])],
feedback=float(params['feedback']),
maxdelay=5,
mul = vol
)
)
elif params['name'] == 'reverb':
# reverb stuff
print("Enable reverb effect")
enabled_effects.append(pyo.STRev(
source,
inpos=0.25,
revtime=float(params['revtime']),
cutoff=float(params['cutoff']),
bal=float(params['balance']),
roomSize=float(params['roomsize']),
mul = vol
)
)
elif params['name'] == 'chorus':
# chorus stuff
print("Enable chorus effect")
enabled_effects.append(pyo.Chorus(
source,
depth=[(params['depth_min']), (params['depth_max'])],
feedback=float(params['feedback']),
bal=float(params['balance']),
mul = vol
)
)
elif params['name'] == 'flanger':
# flanger stuff
print("Enable flanger effect")
enabled_effects.append(flanger.Flanger(
source,
depth=float(params['depth']),
freq=float(params['freq']),
feedback=float(params['feedback']),
mul = vol
)
)
elif params['name'] == 'freqshift':
# frequency shift stuff
print("Enable frequency shift effect")
enabled_effects.append(pyo.FreqShift(
source,
shift=params['shift'],
mul = vol
)
)
elif params['name'] == 'harmonizer':
# harmonizer stuff
print("Enable harmonizer effect")
enabled_effects.append(pyo.Harmonizer(
source,
transpo=params['transpose'],
feedback=float(params['feedback']),
winsize=0.1,
mul = vol
)
)
elif params['name'] == 'phaser':
# phaser stuff
print("Enable phaser effect")
enabled_effects.append(pyo.Phaser(
source,
freq=float(params['frequency']),
spread=float(params['spread']),
q=float(params['q']),
feedback=float(params['feedback']),
num=int(params['num']),
mul = vol
)
)
return enabled_effects
def apply_effects( effects_list ):
'''Once an effects list has been assembled by chain_effects, use this function to enable it'''
effects_list[len(effects_list) - 1].out()
print("APPLIED EFFECTS: ", effects_list)
def signal_handler(jack_id, pyo_server, signal, frame):
'''Close the program and kill JACK appropriately'''
stop_pyo_server(pyo_server)
time.sleep(1)
jackserver.kill_jack_server(jack_id)
sys.exit(0)
def main():
'''Main method. Inits gpio, bridge, jack, and pyo. Then reads effects and starts handling gpio'''
# If GPIO is enabled, initialize the pins and GPIO module.
if GPIO_CAPABLE:
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(button_pin, GPIO.IN, GPIO.PUD_UP)
GPIO.setup(23, GPIO.OUT)
gpio_controller = gpiocontrol.GpioController()
# Initialize the bridge to allow the app to accept connections.
bridge_conn = bridge.Bridge()
# Set up custom options for the sockets
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) #UDP SOCKET
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #TCP SOCKET
s.setblocking(0)
sock.setblocking(0)
s.bind(('', 10000))
sock.bind(('', 10001))
jack_id = jackserver.start_jack_server()
# give the application time for JACK to boot.
time.sleep(5)
# JACK and Pyo set up procedures
pyo_server = start_pyo_server()
pyo_server.setJackAuto()
# Read input from the audio device on channel 0
# and apply the necessary effects from the config file
enabled_effects = chain_effects(pyo.Input(chnl=0), configparser.get_effects())
apply_effects( enabled_effects )
# Create necessary variables used by the GPIO controller module
record_table = []
audio_recorder = []
loop = []
record_table.append(pyo.NewTable(length=60, chnls=1, feedback=0.5))
audio_recorder.append(pyo.TableRec((enabled_effects[len(enabled_effects) - 1]), table=record_table[-1], fadetime=0.05))
already_recording = False
recording_time = 0
inactive_end_time = 0
signal.signal(signal.SIGINT, partial(signal_handler, jack_id, pyo_server))
while True:
# Executes GPIO and loop machine logic flow.
if GPIO_CAPABLE:
# Read the state of the button press.
BUTTON_STATE = gpio_controller.update_gpio()
# Perform actions dependent on the state of the button press.
if BUTTON_STATE == 'INACTIVE' or BUTTON_STATE == 'LOOPING':
inactive_end_time = time.time()
if BUTTON_STATE == 'RECORDING':
recording_time = time.time()
if not already_recording:
print("Recording audios for 5 segundos")
(audio_recorder[-1]).play()
already_recording = True
elif BUTTON_STATE == 'ACTIVATE_LOOP':
loop_len = recording_time - inactive_end_time
loop.append(
pyo.Looper(
table=record_table[-1],
dur=loop_len, xfade=0,
mul=1).out()
)
record_table.append(
pyo.NewTable(
length=60,
chnls=1,
feedback=0.5)
)
audio_recorder.append(
pyo.TableRec(
(enabled_effects[len(enabled_effects) - 1]),
table=record_table[-1],
fadetime=0.05)
)
print("ACTIVATING LOOP")
gpio_controller.set_state("LOOPING")
already_recording = False
elif BUTTON_STATE == 'CLEAR_LOOP':
loop = []
record_table = []
audio_recorder = []
record_table.append(
pyo.NewTable(
length=60,
chnls=1,
feedback=0.5)
)
audio_recorder.append(
pyo.TableRec(
(enabled_effects[len(enabled_effects) - 1]),
table=record_table[-1],
fadetime=0.05)
)
gpio_controller.set_state("INACTIVE")
# See if we got a message from the frontend application
res = bridge_conn.backend(s,sock)
if res:
print(res)
if 'UPDATEPORT' == res[0]:
# There was a request to update the ports. Kill the
# JACK server and restart it with the new ports.
print("Request to update ports")
pyo_server.shutdown()
jackserver.kill_jack_server(jack_id)
time.sleep(2)
jack_id = jackserver.start_jack_server(jackserver.filter_port_selection(res[1]), jackserver.filter_port_selection(res[2]))
time.sleep(2)
pyo_server.reinit(**PYO_INIT_SETTINGS)
pyo_server.boot()
pyo_server.start()
enabled_effects[-1].stop()
enabled_effects = chain_effects(pyo.Input(chnl=0), configparser.get_effects())
apply_effects( enabled_effects )
time.sleep(0.0001)
if __name__ == "__main__":
main()
| gpl-3.0 | 3,335,811,969,404,656,000 | 32.457944 | 138 | 0.533799 | false |
yeming233/rally | rally/plugins/openstack/services/storage/cinder_common.py | 1 | 29782 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from rally.common.i18n import _
from rally import exceptions
from rally.plugins.openstack.services.image import image
from rally.plugins.openstack.services.storage import block
from rally.task import atomic
from rally.task import utils as bench_utils
CONF = block.CONF
class CinderMixin(object):
def _get_client(self):
return self._clients.cinder(self.version)
def _update_resource(self, resource):
try:
manager = getattr(resource, "manager", None)
if manager:
res = manager.get(resource.id)
else:
if isinstance(resource, block.Volume):
attr = "volumes"
elif isinstance(resource, block.VolumeSnapshot):
attr = "volume_snapshots"
elif isinstance(resource, block.VolumeBackup):
attr = "backups"
res = getattr(self._get_client(), attr).get(resource.id)
except Exception as e:
if getattr(e, "code", getattr(e, "http_status", 400)) == 404:
raise exceptions.GetResourceNotFound(resource=resource)
raise exceptions.GetResourceFailure(resource=resource, err=e)
return res
def _wait_available_volume(self, volume):
return bench_utils.wait_for_status(
volume,
ready_statuses=["available"],
update_resource=self._update_resource,
timeout=CONF.benchmark.cinder_volume_create_timeout,
check_interval=CONF.benchmark.cinder_volume_create_poll_interval
)
def list_volumes(self, detailed=True):
"""List all volumes."""
aname = "cinder_v%s.list_volumes" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().volumes.list(detailed)
def get_volume(self, volume_id):
"""Get target volume information."""
aname = "cinder_v%s.get_volume" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().volumes.get(volume_id)
def delete_volume(self, volume):
"""Delete target volume."""
aname = "cinder_v%s.delete_volume" % self.version
with atomic.ActionTimer(self, aname):
self._get_client().volumes.delete(volume)
bench_utils.wait_for_status(
volume,
ready_statuses=["deleted"],
check_deletion=True,
update_resource=self._update_resource,
timeout=CONF.benchmark.cinder_volume_delete_timeout,
check_interval=(CONF.benchmark
.cinder_volume_delete_poll_interval)
)
def extend_volume(self, volume, new_size):
"""Extend the size of the specified volume."""
if isinstance(new_size, dict):
new_size = random.randint(new_size["min"], new_size["max"])
aname = "cinder_v%s.extend_volume" % self.version
with atomic.ActionTimer(self, aname):
self._get_client().volumes.extend(volume, new_size)
return self._wait_available_volume(volume)
def list_snapshots(self, detailed=True):
"""Get a list of all snapshots."""
aname = "cinder_v%s.list_snapshots" % self.version
with atomic.ActionTimer(self, aname):
return (self._get_client()
.volume_snapshots.list(detailed))
def set_metadata(self, volume, sets=10, set_size=3):
"""Set volume metadata.
:param volume: The volume to set metadata on
:param sets: how many operations to perform
:param set_size: number of metadata keys to set in each operation
:returns: A list of keys that were set
"""
key = "cinder_v%s.set_%s_metadatas_%s_times" % (self.version,
set_size,
sets)
with atomic.ActionTimer(self, key):
keys = []
for i in range(sets):
metadata = {}
for j in range(set_size):
key = self.generate_random_name()
keys.append(key)
metadata[key] = self.generate_random_name()
self._get_client().volumes.set_metadata(volume, metadata)
return keys
def delete_metadata(self, volume, keys, deletes=10, delete_size=3):
"""Delete volume metadata keys.
Note that ``len(keys)`` must be greater than or equal to
``deletes * delete_size``.
:param volume: The volume to delete metadata from
:param deletes: how many operations to perform
:param delete_size: number of metadata keys to delete in each operation
:param keys: a list of keys to choose deletion candidates from
"""
if len(keys) < deletes * delete_size:
raise exceptions.InvalidArgumentsException(
"Not enough metadata keys to delete: "
"%(num_keys)s keys, but asked to delete %(num_deletes)s" %
{"num_keys": len(keys),
"num_deletes": deletes * delete_size})
# make a shallow copy of the list of keys so that, when we pop
# from it later, we don't modify the original list.
keys = list(keys)
random.shuffle(keys)
action_name = ("cinder_v%s.delete_%s_metadatas_%s_times"
% (self.version, delete_size, deletes))
with atomic.ActionTimer(self, action_name):
for i in range(deletes):
to_del = keys[i * delete_size:(i + 1) * delete_size]
self._get_client().volumes.delete_metadata(volume, to_del)
def update_readonly_flag(self, volume, read_only):
"""Update the read-only access mode flag of the specified volume.
:param volume: The UUID of the volume to update.
:param read_only: The value to indicate whether to update volume to
read-only access mode.
:returns: A tuple of http Response and body
"""
aname = "cinder_v%s.update_readonly_flag" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().volumes.update_readonly_flag(
volume, read_only)
def upload_volume_to_image(self, volume, force=False,
container_format="bare", disk_format="raw"):
"""Upload the given volume to image.
Returns created image.
:param volume: volume object
:param force: flag to indicate whether to snapshot a volume even if
it's attached to an instance
:param container_format: container format of image. Acceptable
formats: ami, ari, aki, bare, and ovf
:param disk_format: disk format of image. Acceptable formats:
ami, ari, aki, vhd, vmdk, raw, qcow2, vdi and iso
:returns: Returns created image object
"""
aname = "cinder_v%s.upload_volume_to_image" % self.version
with atomic.ActionTimer(self, aname):
resp, img = self._get_client().volumes.upload_to_image(
volume, force, self.generate_random_name(), container_format,
disk_format)
# NOTE (e0ne): upload_to_image changes volume status to uploading
# so we need to wait until it will be available.
volume = self._wait_available_volume(volume)
image_id = img["os-volume_upload_image"]["image_id"]
glance = image.Image(self._clients)
image_inst = glance.get_image(image_id)
image_inst = bench_utils.wait_for_status(
image_inst,
ready_statuses=["active"],
update_resource=glance.get_image,
timeout=CONF.benchmark.glance_image_create_timeout,
check_interval=(CONF.benchmark
.glance_image_create_poll_interval)
)
return image_inst
def create_qos(self, specs):
"""Create a qos specs.
:param specs: A dict of key/value pairs to be set
:rtype: :class:'QoSSpecs'
"""
aname = "cinder_v%s.create_qos" % self.version
name = self.generate_random_name()
with atomic.ActionTimer(self, aname):
return self._get_client().qos_specs.create(name, specs)
def list_qos(self, search_opts=None):
"""Get a list of all qos specs.
:param search_opts: search options
:rtype: list of :class: 'QoSpecs'
"""
aname = "cinder_v%s.list_qos" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().qos_specs.list(search_opts)
def get_qos(self, qos_id):
"""Get a specific qos specs.
:param qos_id: The ID of the :class: 'QoSSpecs' to get
:rtype: :class: 'QoSSpecs'
"""
aname = "cinder_v%s.get_qos" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().qos_specs.get(qos_id)
def set_qos(self, qos_id, set_specs_args):
"""Add/Update keys in qos specs.
:param qos_id: The ID of the :class:`QoSSpecs` to get
:param set_specs_args: A dict of key/value pairs to be set
:rtype: class 'cinderclient.apiclient.base.DictWithMeta'
{"qos_specs": set_specs_args}
"""
aname = "cinder_v%s.set_qos" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().qos_specs.set_keys(qos_id,
set_specs_args)
def qos_associate_type(self, qos_specs, vol_type_id):
"""Associate qos specs from volume type.
:param qos_specs: The qos specs to be associated with
:param vol_type_id: The volume type id to be associated with
:returns: base on client response return True if the request
has been accepted or not
"""
aname = "cinder_v%s.qos_associate_type" % self.version
with atomic.ActionTimer(self, aname):
tuple_res = self._get_client().qos_specs.associate(qos_specs,
vol_type_id)
return (tuple_res[0].status_code == 202)
def qos_disassociate_type(self, qos_specs, vol_type_id):
"""Disassociate qos specs from volume type.
:param qos_specs: The qos specs to be disassociated with
:param vol_type_id: The volume type id to be disassociated with
:returns: base on client response return True if the request
has been accepted or not
"""
aname = "cinder_v%s.qos_disassociate_type" % self.version
with atomic.ActionTimer(self, aname):
tuple_res = self._get_client().qos_specs.disassociate(qos_specs,
vol_type_id)
return (tuple_res[0].status_code == 202)
def delete_snapshot(self, snapshot):
"""Delete the given snapshot.
Returns when the snapshot is actually deleted.
:param snapshot: snapshot object
"""
aname = "cinder_v%s.delete_snapshot" % self.version
with atomic.ActionTimer(self, aname):
self._get_client().volume_snapshots.delete(snapshot)
bench_utils.wait_for_status(
snapshot,
ready_statuses=["deleted"],
check_deletion=True,
update_resource=self._update_resource,
timeout=CONF.benchmark.cinder_volume_delete_timeout,
check_interval=(CONF.benchmark
.cinder_volume_delete_poll_interval)
)
def delete_backup(self, backup):
"""Delete the given backup.
Returns when the backup is actually deleted.
:param backup: backup instance
"""
aname = "cinder_v%s.delete_backup" % self.version
with atomic.ActionTimer(self, aname):
self._get_client().backups.delete(backup)
bench_utils.wait_for_status(
backup,
ready_statuses=["deleted"],
check_deletion=True,
update_resource=self._update_resource,
timeout=CONF.benchmark.cinder_volume_delete_timeout,
check_interval=(CONF.benchmark
.cinder_volume_delete_poll_interval)
)
def restore_backup(self, backup_id, volume_id=None):
"""Restore the given backup.
:param backup_id: The ID of the backup to restore.
:param volume_id: The ID of the volume to restore the backup to.
"""
aname = "cinder_v%s.restore_backup" % self.version
with atomic.ActionTimer(self, aname):
restore = self._get_client().restores.restore(backup_id, volume_id)
restored_volume = self._get_client().volumes.get(restore.volume_id)
return self._wait_available_volume(restored_volume)
def list_backups(self, detailed=True):
"""Return user volume backups list.
:param detailed: True if detailed information about backup
should be listed
"""
aname = "cinder_v%s.list_backups" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().backups.list(detailed)
def list_transfers(self, detailed=True, search_opts=None):
"""Get a list of all volume transfers.
:param detailed: If True, detailed information about transfer
should be listed
:param search_opts: Search options to filter out volume transfers
:returns: list of :class:`VolumeTransfer`
"""
aname = "cinder_v%s.list_transfers" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().transfers.list(detailed, search_opts)
def get_volume_type(self, volume_type):
"""get details of volume_type.
:param volume_type: The ID of the :class:`VolumeType` to get
:returns: :class:`VolumeType`
"""
aname = "cinder_v%s.get_volume_type" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().volume_types.get(volume_type)
def delete_volume_type(self, volume_type):
"""delete a volume type.
:param volume_type: Name or Id of the volume type
:returns: base on client response return True if the request
has been accepted or not
"""
aname = "cinder_v%s.delete_volume_type" % self.version
with atomic.ActionTimer(self, aname):
tuple_res = self._get_client().volume_types.delete(
volume_type)
return (tuple_res[0].status_code == 202)
def set_volume_type_keys(self, volume_type, metadata):
"""Set extra specs on a volume type.
:param volume_type: The :class:`VolumeType` to set extra spec on
:param metadata: A dict of key/value pairs to be set
:returns: extra_specs if the request has been accepted
"""
aname = "cinder_v%s.set_volume_type_keys" % self.version
with atomic.ActionTimer(self, aname):
return volume_type.set_keys(metadata)
def transfer_create(self, volume_id, name=None):
"""Create a volume transfer.
:param name: The name of created transfer
:param volume_id: The ID of the volume to transfer
:rtype: VolumeTransfer
"""
name = name or self.generate_random_name()
aname = "cinder_v%s.transfer_create" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().transfers.create(volume_id, name=name)
def transfer_accept(self, transfer_id, auth_key):
"""Accept a volume transfer.
:param transfer_id: The ID of the transfer to accept.
:param auth_key: The auth_key of the transfer.
:rtype: VolumeTransfer
"""
aname = "cinder_v%s.transfer_accept" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().transfers.accept(transfer_id, auth_key)
def create_encryption_type(self, volume_type, specs):
"""Create encryption type for a volume type. Default: admin only.
:param volume_type: the volume type on which to add an encryption type
:param specs: the encryption type specifications to add
:return: an instance of :class: VolumeEncryptionType
"""
aname = "cinder_v%s.create_encryption_type" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().volume_encryption_types.create(
volume_type, specs)
def get_encryption_type(self, volume_type):
"""Get the volume encryption type for the specified volume type.
:param volume_type: the volume type to query
:return: an instance of :class: VolumeEncryptionType
"""
aname = "cinder_v%s.get_encryption_type" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().volume_encryption_types.get(
volume_type)
def list_encryption_type(self, search_opts=None):
"""List all volume encryption types.
:param search_opts: Options used when search for encryption types
:return: a list of :class: VolumeEncryptionType instances
"""
aname = "cinder_v%s.list_encryption_type" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().volume_encryption_types.list(
search_opts)
def delete_encryption_type(self, volume_type):
"""Delete the encryption type information for the specified volume type.
:param volume_type: the volume type whose encryption type information
must be deleted
"""
aname = "cinder_v%s.delete_encryption_type" % self.version
with atomic.ActionTimer(self, aname):
resp = self._get_client().volume_encryption_types.delete(
volume_type)
if (resp[0].status_code != 202):
raise exceptions.RallyException(
_("EncryptionType Deletion Failed"))
def update_encryption_type(self, volume_type, specs):
"""Update the encryption type information for the specified volume type.
:param volume_type: the volume type whose encryption type information
must be updated
:param specs: the encryption type specifications to update
:return: an instance of :class: VolumeEncryptionType
"""
aname = "cinder_v%s.update_encryption_type" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().volume_encryption_types.update(
volume_type, specs)
class UnifiedCinderMixin(object):
@staticmethod
def _unify_backup(backup):
return block.VolumeBackup(id=backup.id, name=backup.name,
volume_id=backup.volume_id,
status=backup.status)
@staticmethod
def _unify_transfer(transfer):
auth_key = transfer.auth_key if hasattr(transfer, "auth_key") else None
return block.VolumeTransfer(id=transfer.id, name=transfer.name,
volume_id=transfer.volume_id,
auth_key=auth_key)
@staticmethod
def _unify_qos(qos):
return block.QoSSpecs(id=qos.id, name=qos.name, specs=qos.specs)
@staticmethod
def _unify_encryption_type(encryption_type):
return block.VolumeEncryptionType(
id=encryption_type.encryption_id,
volume_type_id=encryption_type.volume_type_id)
def delete_volume(self, volume):
"""Delete a volume."""
self._impl.delete_volume(volume)
def set_metadata(self, volume, sets=10, set_size=3):
"""Update/Set a volume metadata.
:param volume: The updated/setted volume.
:param sets: how many operations to perform
:param set_size: number of metadata keys to set in each operation
:returns: A list of keys that were set
"""
return self._impl.set_metadata(volume, sets=sets, set_size=set_size)
def delete_metadata(self, volume, keys, deletes=10, delete_size=3):
"""Delete volume metadata keys.
Note that ``len(keys)`` must be greater than or equal to
``deletes * delete_size``.
:param volume: The volume to delete metadata from
:param deletes: how many operations to perform
:param delete_size: number of metadata keys to delete in each operation
:param keys: a list of keys to choose deletion candidates from
"""
self._impl.delete_metadata(volume, keys=keys, deletes=10,
delete_size=3)
def update_readonly_flag(self, volume, read_only):
"""Update the read-only access mode flag of the specified volume.
:param volume: The UUID of the volume to update.
:param read_only: The value to indicate whether to update volume to
read-only access mode.
:returns: A tuple of http Response and body
"""
return self._impl.update_readonly_flag(volume, read_only=read_only)
def upload_volume_to_image(self, volume, force=False,
container_format="bare", disk_format="raw"):
"""Upload the given volume to image.
Returns created image.
:param volume: volume object
:param force: flag to indicate whether to snapshot a volume even if
it's attached to an instance
:param container_format: container format of image. Acceptable
formats: ami, ari, aki, bare, and ovf
:param disk_format: disk format of image. Acceptable formats:
ami, ari, aki, vhd, vmdk, raw, qcow2, vdi and iso
:returns: Returns created image object
"""
return self._impl.upload_volume_to_image(
volume, force=force, container_format=container_format,
disk_format=disk_format)
def create_qos(self, specs):
"""Create a qos specs.
:param specs: A dict of key/value pairs to be set
:rtype: :class:'QoSSpecs'
"""
return self._unify_qos(self._impl.create_qos(specs))
def list_qos(self, search_opts=None):
"""Get a list of all qos specs.
:param search_opts: search options
:rtype: list of :class: 'QoSpecs'
"""
return [self._unify_qos(qos)
for qos in self._impl.list_qos(search_opts)]
def get_qos(self, qos_id):
"""Get a specific qos specs.
:param qos_id: The ID of the :class: 'QoSSpecs' to get
:rtype: :class: 'QoSSpecs'
"""
return self._unify_qos(self._impl.get_qos(qos_id))
def set_qos(self, qos, set_specs_args):
"""Add/Update keys in qos specs.
:param qos: The instance of the :class:`QoSSpecs` to set
:param set_specs_args: A dict of key/value pairs to be set
:rtype: :class: 'QoSSpecs'
"""
self._impl.set_qos(qos.id, set_specs_args)
return self._unify_qos(qos)
def qos_associate_type(self, qos_specs, vol_type_id):
"""Associate qos specs from volume type.
:param qos_specs: The qos specs to be associated with
:param vol_type_id: The volume type id to be associated with
"""
self._impl.qos_associate_type(qos_specs, vol_type_id)
return self._unify_qos(qos_specs)
def qos_disassociate_type(self, qos_specs, vol_type_id):
"""Disassociate qos specs from volume type.
:param qos_specs: The qos specs to be disassociated with
:param vol_type_id: The volume type id to be disassociated with
"""
self._impl.qos_disassociate_type(qos_specs, vol_type_id)
return self._unify_qos(qos_specs)
def delete_snapshot(self, snapshot):
"""Delete the given backup.
Returns when the backup is actually deleted.
:param backup: backup instance
"""
self._impl.delete_snapshot(snapshot)
def delete_backup(self, backup):
"""Delete a volume backup."""
self._impl.delete_backup(backup)
def list_backups(self, detailed=True):
"""Return user volume backups list."""
return [self._unify_backup(backup)
for backup in self._impl.list_backups(detailed=detailed)]
def list_transfers(self, detailed=True, search_opts=None):
"""Get a list of all volume transfers.
:param detailed: If True, detailed information about transfer
should be listed
:param search_opts: Search options to filter out volume transfers
:returns: list of :class:`VolumeTransfer`
"""
return [self._unify_transfer(transfer)
for transfer in self._impl.list_transfers(
detailed=detailed, search_opts=search_opts)]
def get_volume_type(self, volume_type):
"""get details of volume_type.
:param volume_type: The ID of the :class:`VolumeType` to get
:returns: :class:`VolumeType`
"""
return self._impl.get_volume_type(volume_type)
def delete_volume_type(self, volume_type):
"""delete a volume type.
:param volume_type: Name or Id of the volume type
:returns: base on client response return True if the request
has been accepted or not
"""
return self._impl.delete_volume_type(volume_type)
def set_volume_type_keys(self, volume_type, metadata):
"""Set extra specs on a volume type.
:param volume_type: The :class:`VolumeType` to set extra spec on
:param metadata: A dict of key/value pairs to be set
:returns: extra_specs if the request has been accepted
"""
return self._impl.set_volume_type_keys(volume_type, metadata)
def transfer_create(self, volume_id, name=None):
"""Creates a volume transfer.
:param name: The name of created transfer
:param volume_id: The ID of the volume to transfer.
:returns: Return the created transfer.
"""
return self._unify_transfer(
self._impl.transfer_create(volume_id, name=name))
def transfer_accept(self, transfer_id, auth_key):
"""Accept a volume transfer.
:param transfer_id: The ID of the transfer to accept.
:param auth_key: The auth_key of the transfer.
:returns: VolumeTransfer
"""
return self._unify_transfer(
self._impl.transfer_accept(transfer_id, auth_key=auth_key))
def create_encryption_type(self, volume_type, specs):
"""Create encryption type for a volume type. Default: admin only.
:param volume_type: the volume type on which to add an encryption type
:param specs: the encryption type specifications to add
:return: an instance of :class: VolumeEncryptionType
"""
return self._unify_encryption_type(
self._impl.create_encryption_type(volume_type, specs=specs))
def get_encryption_type(self, volume_type):
"""Get the volume encryption type for the specified volume type.
:param volume_type: the volume type to query
:return: an instance of :class: VolumeEncryptionType
"""
return self._unify_encryption_type(
self._impl.get_encryption_type(volume_type))
def list_encryption_type(self, search_opts=None):
"""List all volume encryption types.
:param search_opts: Options used when search for encryption types
:return: a list of :class: VolumeEncryptionType instances
"""
return [self._unify_encryption_type(encryption_type)
for encryption_type in self._impl.list_encryption_type(
search_opts=search_opts)]
def delete_encryption_type(self, volume_type):
"""Delete the encryption type information for the specified volume type.
:param volume_type: the volume type whose encryption type information
must be deleted
"""
return self._impl.delete_encryption_type(volume_type)
def update_encryption_type(self, volume_type, specs):
"""Update the encryption type information for the specified volume type.
:param volume_type: the volume type whose encryption type information
must be updated
:param specs: the encryption type specifications to update
:return: an instance of :class: VolumeEncryptionType
"""
return self._impl.update_encryption_type(volume_type, specs=specs)
| apache-2.0 | -218,852,773,355,620,600 | 39.853224 | 80 | 0.601874 | false |
tsauerwein/c2cgeoportal | c2cgeoportal/tests/xmlstr.py | 3 | 5978 | # -*- coding: utf-8 -*-
# Copyright (c) 2013-2014, Camptocamp SA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
getfeature = """
<wfs:GetFeature xmlns:wfs="http://www.opengis.net/wfs" service="WFS" version="1.1.0" xsi:schemaLocation="http://www.opengis.net/wfs http://schemas.opengis.net/wfs/1.1.0/wfs.xsd" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<wfs:Query typeName="feature:grundstueck" srsName="EPSG:2056" xmlns:feature="http://mapserver.gis.umn.edu/mapserver">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc">
<ogc:PropertyIsLike matchCase="false" wildCard="*" singleChar="." escapeChar="!">
<ogc:PropertyName>nummer</ogc:PropertyName>
<ogc:Literal>10*</ogc:Literal>
</ogc:PropertyIsLike>
</ogc:Filter>
</wfs:Query>
</wfs:GetFeature>
"""
feature = """
<gml:featureMember>
<ms:grundstueck>
<gml:boundedBy>
<gml:Envelope srsName="EPSG:2056">
<gml:lowerCorner>2626901.051818 1258035.790009</gml:lowerCorner>
<gml:upperCorner>2627050.862856 1258132.841364</gml:upperCorner>
</gml:Envelope>
</gml:boundedBy>
<ms:msGeometry>
<gml:LineString srsName="EPSG:2056">
<gml:posList srsDimension="2">2627033.201116 1258103.390372 2627034.048142 1258105.737388 2627010.821109 1258118.506850 2626985.111074 1258132.841364 2626980.135958 1258123.622322 2626978.010913 1258120.089309 2626966.170890 1258126.005538 2626949.985629 1258108.760552 2626924.919220 1258081.422566 2626910.187979 1258065.386575 2626901.051818 1258054.063564 2626935.224905 1258039.509934 2626956.098017 1258037.068626 2626971.167108 1258036.400415 2627000.949294 1258035.790009 2627018.708458 1258041.255835 2627029.967583 1258047.114753 2627048.056822 1258060.580669 2627050.862856 1258062.337652 2627048.942861 1258064.236700 2627036.107888 1258076.303014 2627023.360917 1258088.497329 2627028.596025 1258096.640354 2627033.201116 1258103.390372 </gml:posList>
</gml:LineString>
</ms:msGeometry>
<ms:gs_id>1676545</ms:gs_id>
<ms:lsn_oid>1510175178</ms:lsn_oid>
<ms:nummer>1071</ms:nummer>
<ms:gueltigkeit>rechtskräftig</ms:gueltigkeit>
<ms:art>Liegenschaft</ms:art>
<ms:gemeinde_id_bfs>2861</ms:gemeinde_id_bfs>
<ms:meta_id>1510</ms:meta_id>
<ms:flaechenmass>8774</ms:flaechenmass>
<ms:nummer_m_deko>1071</ms:nummer_m_deko>
<ms:nbident>BL0200002861</ms:nbident>
<ms:vollstaendigkeit>vollständig</ms:vollstaendigkeit>
<ms:datenherr>Jermann</ms:datenherr>
<ms:mut_nummer>pn18</ms:mut_nummer>
</ms:grundstueck>
</gml:featureMember>
"""
featurecollection_outlimit = """
<wfs:FeatureCollection xmlns:ms="http://mapserver.gis.umn.edu/mapserver" xmlns:ogc="http://www.opengis.net/ogc" xmlns:gml="http://www.opengis.net/gml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:wfs="http://www.opengis.net/wfs" xsi:schemaLocation="http://mapserver.gis.umn.edu/mapserver http://c2cpc29.camptocamp.com/sbrunner/mapserv?SERVICE=WFS&VERSION=1.1.0&REQUEST=DescribeFeatureType&TYPENAME=feature:grundstueck&OUTPUTFORMAT=text/xml;%20subtype=gml/3.1.1 http://www.opengis.net/wfs http://schemas.opengis.net/wfs/1.1.0/wfs.xsd">
<gml:boundedBy>
<gml:Envelope srsName="EPSG:2056">
<gml:lowerCorner>2595270.118588 1244096.257242</gml:lowerCorner>
<gml:upperCorner>2638409.063753 1267658.751429</gml:upperCorner>
</gml:Envelope>
</gml:boundedBy>
""" + feature * 205 + """
</wfs:FeatureCollection>
"""
featurecollection_inlimit = """
<wfs:FeatureCollection xmlns:ms="http://mapserver.gis.umn.edu/mapserver" xmlns:ogc="http://www.opengis.net/ogc" xmlns:gml="http://www.opengis.net/gml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:wfs="http://www.opengis.net/wfs" xsi:schemaLocation="http://mapserver.gis.umn.edu/mapserver http://c2cpc29.camptocamp.com/sbrunner/mapserv?SERVICE=WFS&VERSION=1.1.0&REQUEST=DescribeFeatureType&TYPENAME=feature:grundstueck&OUTPUTFORMAT=text/xml;%20subtype=gml/3.1.1 http://www.opengis.net/wfs http://schemas.opengis.net/wfs/1.1.0/wfs.xsd">
<gml:boundedBy>
<gml:Envelope srsName="EPSG:2056">
<gml:lowerCorner>2595270.118588 1244096.257242</gml:lowerCorner>
<gml:upperCorner>2638409.063753 1267658.751429</gml:upperCorner>
</gml:Envelope>
</gml:boundedBy>
""" + feature * 199 + """
</wfs:FeatureCollection>
"""
| bsd-2-clause | -5,369,876,525,444,830,000 | 60.608247 | 776 | 0.732932 | false |
ayepezv/GAD_ERP | openerp/__init__.py | 2 | 2641 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
""" OpenERP core library."""
#----------------------------------------------------------
# Running mode flags (gevent, prefork)
#----------------------------------------------------------
# Is the server running with gevent.
import sys
evented = False
if sys.modules.get("gevent") is not None:
evented = True
# Is the server running in prefork mode (e.g. behind Gunicorn).
# If this is True, the processes have to communicate some events,
# e.g. database update or cache invalidation. Each process has also
# its own copy of the data structure and we don't need to care about
# locks between threads.
multi_process = False
#----------------------------------------------------------
# libc UTC hack
#----------------------------------------------------------
# Make sure the OpenERP server runs in UTC. This is especially necessary
# under Windows as under Linux it seems the real import of time is
# sufficiently deferred so that setting the TZ environment variable
# in openerp.cli.server was working.
import os
os.environ['TZ'] = 'UTC' # Set the timezone...
import time # ... *then* import time.
del os
del time
#----------------------------------------------------------
# Shortcuts
#----------------------------------------------------------
# The hard-coded super-user id (a.k.a. administrator, or root user).
SUPERUSER_ID = 1
def registry(database_name=None):
"""
Return the model registry for the given database, or the database mentioned
on the current thread. If the registry does not exist yet, it is created on
the fly.
"""
if database_name is None:
import threading
database_name = threading.currentThread().dbname
return modules.registry.RegistryManager.get(database_name)
#----------------------------------------------------------
# Imports
#----------------------------------------------------------
import addons
import conf
import loglevels
import modules
import netsvc
import osv
import pooler
import release
import report
import service
import sql_db
import tools
import workflow
#----------------------------------------------------------
# Model classes, fields, api decorators, and translations
#----------------------------------------------------------
from . import models
from . import fields
from . import api
from openerp.tools.translate import _
#----------------------------------------------------------
# Other imports, which may require stuff from above
#----------------------------------------------------------
import cli
import http
| gpl-3.0 | -7,602,556,953,469,250,000 | 31.604938 | 79 | 0.537675 | false |
bburan/psiexperiment | psi/controller/calibration/chirp_calibration.py | 1 | 1129 | from ..util import acquire
def chirp_power(engine, freq_lb=50, freq_ub=100e3, attenuation=0, vrms=1,
repetitions=32, duration=0.1, iti=0.01):
calibration = FlatCalibration.as_attenuation(vrms=vrms)
ai_fs = engine.hw_ai_channels[0].fs
ao_fs = engine.hw_ao_channels[0].fs
queue = FIFOSignalQueue(ao_fs)
factory = chirp_factory(ao_fs, freq_lb, freq_ub, duration, attenuation,
calibration=calibration)
waveform = generate_waveform(factory, int(duration*ao_fs))
print(waveform)
queue.append(waveform, repetitions, iti)
ao_channel = engine.hw_ao_channels[0]
output = QueuedEpochOutput(parent=ao_channel, queue=quee,
auto_decrement=True)
epochs = acquire(engine, queue, duration+iti)
def tone_calibration(engine, *args, **kwargs):
'''
Single output calibration at a fixed frequency
Returns
-------
sens : dB (V/Pa)
Sensitivity of output in dB (V/Pa).
'''
output_sens = tone_sens(engine, frequencies, *args, **kwargs)[0]
return PointCalibration(frequencies, output_sens)
| mit | -6,651,210,350,675,184,000 | 33.212121 | 75 | 0.643047 | false |
nathanielksmith/done | parsedatetime/tests/TestFrenchLocale.py | 1 | 4203 | #!/usr/bin/env python
"""
Test parsing of simple date and times using the French locale
Note: requires PyICU
"""
import unittest, time, datetime
import parsedatetime.parsedatetime as pt
import parsedatetime.parsedatetime_consts as ptc
# a special compare function is used to allow us to ignore the seconds as
# the running of the test could cross a minute boundary
def _compareResults(result, check):
target, t_flag = result
value, v_flag = check
t_yr, t_mth, t_dy, t_hr, t_min, _, _, _, _ = target
v_yr, v_mth, v_dy, v_hr, v_min, _, _, _, _ = value
return ((t_yr == v_yr) and (t_mth == v_mth) and (t_dy == v_dy) and
(t_hr == v_hr) and (t_min == v_min)) and (t_flag == v_flag)
class test(unittest.TestCase):
def setUp(self):
self.ptc = ptc.Constants('fr_FR', usePyICU=True)
self.cal = pt.Calendar(self.ptc)
self.yr, self.mth, self.dy, self.hr, self.mn, self.sec, self.wd, self.yd, self.isdst = time.localtime()
def testTimes(self):
if self.ptc.localeID == 'fr_FR':
start = datetime.datetime(self.yr, self.mth, self.dy, self.hr, self.mn, self.sec).timetuple()
target = datetime.datetime(self.yr, self.mth, self.dy, 23, 0, 0).timetuple()
self.assertTrue(_compareResults(self.cal.parse('2300', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('23:00', start), (target, 2)))
target = datetime.datetime(self.yr, self.mth, self.dy, 11, 0, 0).timetuple()
self.assertTrue(_compareResults(self.cal.parse('1100', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('11:00', start), (target, 2)))
target = datetime.datetime(self.yr, self.mth, self.dy, 7, 30, 0).timetuple()
self.assertTrue(_compareResults(self.cal.parse('730', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('0730', start), (target, 2)))
target = datetime.datetime(self.yr, self.mth, self.dy, 17, 30, 0).timetuple()
self.assertTrue(_compareResults(self.cal.parse('1730', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('173000', start), (target, 2)))
def testDates(self):
if self.ptc.localeID == 'fr_FR':
start = datetime.datetime(self.yr, self.mth, self.dy, self.hr, self.mn, self.sec).timetuple()
target = datetime.datetime(2006, 8, 25, self.hr, self.mn, self.sec).timetuple()
self.assertTrue(_compareResults(self.cal.parse('25/08/2006', start), (target, 1)))
self.assertTrue(_compareResults(self.cal.parse('25/8/06', start), (target, 1)))
self.assertTrue(_compareResults(self.cal.parse(u'ao\xfbt 25, 2006', start), (target, 1)))
self.assertTrue(_compareResults(self.cal.parse(u'ao\xfbt 25 2006', start), (target, 1)))
if self.mth > 8 or (self.mth == 8 and self.dy > 25):
target = datetime.datetime(self.yr+1, 8, 25, self.hr, self.mn, self.sec).timetuple()
else:
target = datetime.datetime(self.yr, 8, 25, self.hr, self.mn, self.sec).timetuple()
self.assertTrue(_compareResults(self.cal.parse('25/8', start), (target, 1)))
self.assertTrue(_compareResults(self.cal.parse('25/08', start), (target, 1)))
def testWeekDays(self):
if self.ptc.localeID == 'fr_FR':
start = datetime.datetime(self.yr, self.mth, self.dy, self.hr, self.mn, self.sec).timetuple()
o1 = self.ptc.CurrentDOWParseStyle
o2 = self.ptc.DOWParseStyle
# set it up so the current dow returns current day
self.ptc.CurrentDOWParseStyle = True
self.ptc.DOWParseStyle = 1
for i in range(0,7):
dow = self.ptc.shortWeekdays[i]
result = self.cal.parse(dow, start)
yr, mth, dy, hr, mn, sec, wd, yd, isdst = result[0]
self.assertTrue(wd == i)
self.ptc.CurrentDOWParseStyle = o1
self.ptc.DOWParseStyle = o2
if __name__ == "__main__":
unittest.main()
| gpl-3.0 | 6,624,456,152,510,218,000 | 40.205882 | 111 | 0.596241 | false |
wholmgren/pvlib-python | pvlib/test/conftest.py | 1 | 3981 | import inspect
import os
import platform
import numpy as np
import pandas as pd
from pkg_resources import parse_version
import pytest
import pvlib
pvlib_base_version = \
parse_version(parse_version(pvlib.__version__).base_version)
# decorator takes one argument: the base version for which it should fail
# for example @fail_on_pvlib_version('0.7') will cause a test to fail
# on pvlib versions 0.7a, 0.7b, 0.7rc1, etc.
# test function may not take args, kwargs, or fixtures.
def fail_on_pvlib_version(version):
# second level of decorator takes the function under consideration
def wrapper(func):
# third level defers computation until the test is called
# this allows the specific test to fail at test runtime,
# rather than at decoration time (when the module is imported)
def inner():
# fail if the version is too high
if pvlib_base_version >= parse_version(version):
pytest.fail('the tested function is scheduled to be '
'removed in %s' % version)
# otherwise return the function to be executed
else:
return func()
return inner
return wrapper
# commonly used directories in the tests
test_dir = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe())))
data_dir = os.path.join(test_dir, os.pardir, 'data')
has_python2 = parse_version(platform.python_version()) < parse_version('3')
platform_is_windows = platform.system() == 'Windows'
skip_windows = pytest.mark.skipif(platform_is_windows,
reason='does not run on windows')
try:
import scipy
has_scipy = True
except ImportError:
has_scipy = False
requires_scipy = pytest.mark.skipif(not has_scipy, reason='requires scipy')
try:
import tables
has_tables = True
except ImportError:
has_tables = False
requires_tables = pytest.mark.skipif(not has_tables, reason='requires tables')
try:
import ephem
has_ephem = True
except ImportError:
has_ephem = False
requires_ephem = pytest.mark.skipif(not has_ephem, reason='requires ephem')
def pandas_0_17():
return parse_version(pd.__version__) >= parse_version('0.17.0')
needs_pandas_0_17 = pytest.mark.skipif(
not pandas_0_17(), reason='requires pandas 0.17 or greater')
def numpy_1_10():
return parse_version(np.__version__) >= parse_version('1.10.0')
needs_numpy_1_10 = pytest.mark.skipif(
not numpy_1_10(), reason='requires numpy 1.10 or greater')
def pandas_0_22():
return parse_version(pd.__version__) >= parse_version('0.22.0')
needs_pandas_0_22 = pytest.mark.skipif(
not pandas_0_22(), reason='requires pandas 0.22 or greater')
def has_spa_c():
try:
from pvlib.spa_c_files.spa_py import spa_calc
except ImportError:
return False
else:
return True
requires_spa_c = pytest.mark.skipif(not has_spa_c(), reason="requires spa_c")
def has_numba():
try:
import numba
except ImportError:
return False
else:
vers = numba.__version__.split('.')
if int(vers[0] + vers[1]) < 17:
return False
else:
return True
requires_numba = pytest.mark.skipif(not has_numba(), reason="requires numba")
try:
import siphon
has_siphon = True
except ImportError:
has_siphon = False
requires_siphon = pytest.mark.skipif(not has_siphon,
reason='requires siphon')
try:
import netCDF4 # noqa: F401
has_netCDF4 = True
except ImportError:
has_netCDF4 = False
requires_netCDF4 = pytest.mark.skipif(not has_netCDF4,
reason='requires netCDF4')
try:
import pvfactors # noqa: F401
has_pvfactors = True
except ImportError:
has_pvfactors = False
requires_pvfactors = pytest.mark.skipif(not has_pvfactors,
reason='requires pvfactors')
| bsd-3-clause | -1,695,744,012,780,262,100 | 25.019608 | 78 | 0.645566 | false |
optikfluffel/lagesonum | lagesonum/bottle_app.py | 1 | 4024 | # coding: utf-8
import sqlite3
import os
import time
import bottle
from bottle import default_app, route, view
from bottle import request
from bottle_utils.i18n import I18NPlugin
#from bottle_utils.i18n import lazy_gettext as _
#todo: refactor so that there is no error in Py3 local deployment and testing
import input_number as ip
from dbhelper import initialize_database
import hashlib
MOD_PATH = os.path.dirname(os.path.abspath(__file__))
DB_PATH = os.path.abspath(os.path.join(MOD_PATH, '..', '..', "lagesonr.db"))
if not os.path.exists(DB_PATH):
initialize_database(DB_PATH)
lagesonrdb = sqlite3.connect(DB_PATH)
#todo: populate list dynamically based on available/selected translations
LANGS = [
('de_DE', 'Deutsch'),
('en_US', 'English'),
]
# ('ar_AR', 'Arab'),
DEFAULT_LOCALE = 'en_US'
@route('/')
@view('start_page')
def index():
"""1.Seite: Helfer steht am LaGeSo und gibt Nummern ein [_____] """
return {'entered': []}
@route('/', method='POST')
@view('start_page')
def do_enter():
numbers = request.forms.get('numbers')
timestamp = time.asctime()
numbers = [num.strip() for num in numbers.split('\n')]
result_num = []
#todo: refactor fingerprint in extra function for better testing
usr_agent = str(request.environ.get('HTTP_USER_AGENT'))
usr_lang = str(request.environ.get('HTTP_ACCEPT_LANGUAGE'))
usr_ip = str(request.remote_addr)
usr_fingerprint = usr_agent + usr_lang + usr_ip
usr_hash = hashlib.md5(usr_fingerprint.encode("utf-8")).hexdigest()
with lagesonrdb as con:
cur = con.cursor()
for num in set(numbers):
if ip.is_valid_number(num) and ip.is_ok_with_db(
num) and ip.is_valid_user():
num = str(num).capitalize()
query = 'SELECT NUMBER FROM NUMBERS WHERE NUMBER="%s" AND FINGERPRINT="%s"' % (num, usr_hash)
if len(list(cur.execute(query))) == 0:
insert = 'INSERT INTO NUMBERS(NUMBER, TIME, PLACE, USER, FINGERPRINT) VALUES ("%s", "%s", "-", ' \
'"-", "%s")' % (num, timestamp, usr_hash)
cur.execute(insert)
result_num.append(num)
else:
result_num.append("ALREADY ENTERED BY - %s - %s - %s: %s" % (usr_ip, usr_agent, usr_lang, num))
#return {'entered': ["already before - by you!"], 'timestamp': timestamp}
else:
result_num.append("INVALID INPUT: %s" % num)
return {'entered': result_num, 'timestamp': timestamp}
@route('/query')
@view('query_page')
def query():
return {'result': '-', 'timestamp_first': '-','timestamp_last': '-', 'n': '-'}
@route('/query', method='POST')
@view('query_page')
def do_query():
number = request.forms.get('number')
timestamp_first = '-'
timestamp_last = '-'
n = '0'
if ip.is_valid_number(number) and ip.is_ok_with_db(
number) and ip.is_valid_user():
with lagesonrdb as con:
cur = con.cursor()
number = str(number).capitalize()
query = 'SELECT TIME FROM NUMBERS WHERE NUMBER="%s" ORDER BY TIME' % number
result = list(cur.execute(query))
n = len(result)
if n > 0:
timestamp_first, timestamp_last = result[0][0], result[-1][0]
else:
timestamp_first = 'NOT FOUND'
else:
timestamp_first = 'INVALID INPUT'
return {'result': number, 'timestamp_first': timestamp_first,
'timestamp_last': timestamp_last, 'n': n}
@route('/about')
@view('about')
def about():
pass
@route('/impressum')
@view('impressum')
def impressum():
pass
# findet templates im gleichen Verzeichnis
bottle.TEMPLATE_PATH.append(MOD_PATH)
app = default_app()
application = I18NPlugin(app, langs=LANGS, default_locale=DEFAULT_LOCALE,
domain='messages',
locale_dir=os.path.join(MOD_PATH, 'locales'))
| mit | 4,199,236,689,896,837,600 | 30.193798 | 118 | 0.591451 | false |
shivam5992/pywordcloud-flask | words.py | 1 | 4499 | '''
Python implementation of HTML wordcloud of words collected from
a website, Paragraph Input or File Upload. Flask Web App implementation
of the same.
Author: Shivam Bansal
Email: [email protected]
Website: www.shivambansal.com
Version: 0.1
'''
from flask import Flask, render_template, request, flash, redirect, url_for
from BeautifulSoup import BeautifulSoup
import urllib, random, re, string, stopwords
app = Flask(__name__)
app.secret_key = 'You will never guess'
'''
Index router function, Receive post request and displays the html wordcloud
'''
@app.route('/', methods = ['GET','POST'])
@app.route('/index', methods = ['GET','POST'])
def index():
if request.method == 'POST':
''' Store post variables '''
url = request.form['urllink']
case = request.form['case']
show_freq = request.form['show_freq']
''' Try to connect with the URL '''
try:
if not url.startswith("http"):
url = "http://" + url
htmltext = urllib.urlopen(url).read()
except:
flash("Cannot connect to the requested url")
return redirect(url_for('startover'))
''' Get all text from the html repsonse '''
soup = BeautifulSoup(htmltext)
texts = soup.findAll(text=True)
visible_texts = filter(visible, texts)
article = ""
for text in visible_texts:
article += text.encode("utf-8")
article = str(article)
article = BeautifulSoup(article, convertEntities=BeautifulSoup.HTML_ENTITIES)
#exclude = set(string.punctuation)
#article = str(article)
#article = ''.join(ch for ch in article if ch not in exclude)
article = str(article).replace("\n"," ")
''' Get top keywords '''
freq = 50
a = getKeywords(article, case, freq)
random.shuffle(a)
b = [x[1] for x in a]
minFreq = min(b)
maxFreq = max(b)
''' Create html span tags and corresponding css '''
span = ""
css = """#box{font-family:'calibri';border:2px solid black;}
#box a{text-decoration : none}
"""
''' Colors for words in wordcloud '''
colors = ['#607ec5','#002a8b','#86a0dc','#4c6db9']
colsize = len(colors)
k = 0
for index,item in enumerate(a):
index += 1
if case == "upper":
tag = str(item[0]).upper()
else:
tag = str(item[0])
if show_freq == "yes":
span += '<a href=#><span class="word'+str(index)+'" id="tag'+str(index)+'"> ' + tag + " (" + str(item[1]) + ") " + " </span></a>\n"
else:
span += '<a href=#><span class="word'+str(index)+'" id="tag'+str(index)+'"> ' + tag + " </span></a>\n"
''' Algorithm to scale sizes'''
freqTag = int(item[1])
fontMax = 5.5
fontMin = 1.5
K = (freqTag - minFreq)/(maxFreq - minFreq)
frange = fontMax - fontMin
C = 4
K = float(freqTag - minFreq)/(maxFreq - minFreq)
size = fontMin + (C*float(K*frange/C))
css += '#tag'+str(index)+'{font-size: '+ str(size) +'em;color: '+colors[int(k%colsize)]+'}\n'
css += '#tag'+str(index)+':hover{color: red}\n'
k += 1
''' Write the HTML and CSS into seperate files '''
f = open('templates/wordcloud.html', 'w')
message = """
<style type="text/css">
""" + css +"""
</style>
<div id='box'>
""" + span + """
</div>
"""
f.write(message)
f.close
f.flush()
return render_template('index.html')
startover()
return render_template('index.html')
'''
Function to get top keywords from an article
'''
def getKeywords(articletext, case, freq):
''' Create the dictionary for output response '''
word_dict = {}
word_list = articletext.lower().split()
filtered_words = word_list
for word in filtered_words:
if word not in stopwords.stopwords and word.isalnum() and not word.isdigit() and not len(word) == 1:
if word not in word_dict:
word_dict[word] = 1
if word in word_dict:
word_dict[word] += 1
top_words = sorted(word_dict.items(),key=lambda(k,v):(v,k),reverse=True)[0:freq]
''' Return a list of dictionaies, dictionaies contains word and their frequencies '''
top = []
for w in top_words:
top.append(w)
return top
'''
Function to reset everthing and startover
'''
@app.route('/startover')
def startover():
f = open("templates/wordcloud.html",'w')
f.write("")
f.close
return redirect(url_for('index'))
def visible(element):
if element.parent.name in ['style', 'script', '[document]', 'head', 'title']:
return False
elif re.match('<!--.*-->', str(element)):
return False
return True
'''
Run the Flask Application
'''
if __name__ == '__main__':
app.run(debug = True) | mit | 8,035,192,736,946,009,000 | 25.162791 | 145 | 0.625695 | false |
steveb/heat | heat/engine/clients/os/neutron/__init__.py | 1 | 6485 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutronclient.common import exceptions
from neutronclient.neutron import v2_0 as neutronV20
from neutronclient.v2_0 import client as nc
from oslo_utils import uuidutils
from heat.common import exception
from heat.engine.clients import client_plugin
from heat.engine.clients import os as os_client
class NeutronClientPlugin(client_plugin.ClientPlugin):
exceptions_module = exceptions
service_types = [NETWORK] = ['network']
def _create(self):
con = self.context
endpoint_type = self._get_client_option('neutron', 'endpoint_type')
endpoint = self.url_for(service_type=self.NETWORK,
endpoint_type=endpoint_type)
args = {
'auth_url': con.auth_url,
'service_type': self.NETWORK,
'token': self.auth_token,
'endpoint_url': endpoint,
'endpoint_type': endpoint_type,
'ca_cert': self._get_client_option('neutron', 'ca_file'),
'insecure': self._get_client_option('neutron', 'insecure')
}
return nc.Client(**args)
def is_not_found(self, ex):
if isinstance(ex, (exceptions.NotFound,
exceptions.NetworkNotFoundClient,
exceptions.PortNotFoundClient)):
return True
return (isinstance(ex, exceptions.NeutronClientException) and
ex.status_code == 404)
def is_conflict(self, ex):
bad_conflicts = (exceptions.OverQuotaClient,)
return (isinstance(ex, exceptions.Conflict) and
not isinstance(ex, bad_conflicts))
def is_over_limit(self, ex):
if not isinstance(ex, exceptions.NeutronClientException):
return False
return ex.status_code == 413
def is_no_unique(self, ex):
return isinstance(ex, exceptions.NeutronClientNoUniqueMatch)
def is_invalid(self, ex):
return isinstance(ex, exceptions.StateInvalidClient)
def find_resourceid_by_name_or_id(self, resource, name_or_id,
cmd_resource=None):
return self._find_resource_id(self.context.tenant_id,
resource, name_or_id,
cmd_resource)
@os_client.MEMOIZE_FINDER
def _find_resource_id(self, tenant_id,
resource, name_or_id, cmd_resource):
# tenant id in the signature is used for the memoization key,
# that would differentiate similar resource names across tenants.
return neutronV20.find_resourceid_by_name_or_id(
self.client(), resource, name_or_id, cmd_resource=cmd_resource)
@os_client.MEMOIZE_EXTENSIONS
def _list_extensions(self):
extensions = self.client().list_extensions().get('extensions')
return set(extension.get('alias') for extension in extensions)
def has_extension(self, alias):
"""Check if specific extension is present."""
return alias in self._list_extensions()
def _resolve(self, props, key, id_key, key_type):
if props.get(key):
props[id_key] = self.find_resourceid_by_name_or_id(key_type,
props.pop(key))
return props[id_key]
def resolve_pool(self, props, pool_key, pool_id_key):
if props.get(pool_key):
props[pool_id_key] = self.find_resourceid_by_name_or_id(
'pool', props.get(pool_key), cmd_resource='lbaas_pool')
props.pop(pool_key)
return props[pool_id_key]
def resolve_router(self, props, router_key, router_id_key):
return self._resolve(props, router_key, router_id_key, 'router')
def network_id_from_subnet_id(self, subnet_id):
subnet_info = self.client().show_subnet(subnet_id)
return subnet_info['subnet']['network_id']
def check_lb_status(self, lb_id):
lb = self.client().show_loadbalancer(lb_id)['loadbalancer']
status = lb['provisioning_status']
if status == 'ERROR':
raise exception.ResourceInError(resource_status=status)
return status == 'ACTIVE'
def get_qos_policy_id(self, policy):
"""Returns the id of QoS policy.
Args:
policy: ID or name of the policy.
"""
return self.find_resourceid_by_name_or_id(
'policy', policy, cmd_resource='qos_policy')
def get_secgroup_uuids(self, security_groups):
'''Returns a list of security group UUIDs.
Args:
security_groups: List of security group names or UUIDs
'''
seclist = []
all_groups = None
for sg in security_groups:
if uuidutils.is_uuid_like(sg):
seclist.append(sg)
else:
if not all_groups:
response = self.client().list_security_groups()
all_groups = response['security_groups']
same_name_groups = [g for g in all_groups if g['name'] == sg]
groups = [g['id'] for g in same_name_groups]
if len(groups) == 0:
raise exception.EntityNotFound(entity='Resource', name=sg)
elif len(groups) == 1:
seclist.append(groups[0])
else:
# for admin roles, can get the other users'
# securityGroups, so we should match the tenant_id with
# the groups, and return the own one
own_groups = [g['id'] for g in same_name_groups
if g['tenant_id'] == self.context.tenant_id]
if len(own_groups) == 1:
seclist.append(own_groups[0])
else:
raise exception.PhysicalResourceNameAmbiguity(name=sg)
return seclist
| apache-2.0 | -6,405,323,272,617,855,000 | 38.542683 | 78 | 0.588743 | false |
gazbot/conference-project | models.py | 1 | 5513 | #!/usr/bin/env python
"""models.py
Udacity conference server-side Python App Engine data & ProtoRPC models
$Id: models.py,v 1.1 2014/05/24 22:01:10 wesc Exp $
created/forked from conferences.py by wesc on 2014 may 24
"""
__author__ = '[email protected] (Wesley Chun)'
import httplib
import endpoints
from protorpc import messages
from google.appengine.ext import ndb
class ConflictException(endpoints.ServiceException):
"""ConflictException -- exception mapped to HTTP 409 response"""
http_status = httplib.CONFLICT
class Profile(ndb.Model):
"""Profile -- User profile object"""
displayName = ndb.StringProperty()
mainEmail = ndb.StringProperty()
teeShirtSize = ndb.StringProperty(default='NOT_SPECIFIED')
conferenceKeysToAttend = ndb.StringProperty(repeated=True)
sessionKeysWishlist = ndb.StringProperty(repeated=True)
class ProfileMiniForm(messages.Message):
"""ProfileMiniForm -- update Profile form message"""
displayName = messages.StringField(1)
teeShirtSize = messages.EnumField('TeeShirtSize', 2)
class ProfileForm(messages.Message):
"""ProfileForm -- Profile outbound form message"""
displayName = messages.StringField(1)
mainEmail = messages.StringField(2)
teeShirtSize = messages.EnumField('TeeShirtSize', 3)
conferenceKeysToAttend = messages.StringField(4, repeated=True)
sessionKeysWishlist = messages.StringField(5, repeated=True)
class StringMessage(messages.Message):
"""StringMessage-- outbound (single) string message"""
data = messages.StringField(1, required=True)
class BooleanMessage(messages.Message):
"""BooleanMessage-- outbound Boolean value message"""
data = messages.BooleanField(1)
class Conference(ndb.Model):
"""Conference -- Conference object"""
name = ndb.StringProperty(required=True)
description = ndb.StringProperty()
organizerUserId = ndb.StringProperty()
topics = ndb.StringProperty(repeated=True)
city = ndb.StringProperty()
startDate = ndb.DateProperty()
month = ndb.IntegerProperty() # TODO: do we need for indexing like Java?
endDate = ndb.DateProperty()
maxAttendees = ndb.IntegerProperty()
seatsAvailable = ndb.IntegerProperty()
class Session(ndb.Model):
"""Session -- Session object"""
name = ndb.StringProperty(required=True)
description = ndb.StringProperty()
highlights = ndb.StringProperty(repeated=True)
startTime = ndb.TimeProperty()
sessionDate = ndb.DateProperty()
typeOfSession = ndb.StringProperty(default='NOT_SPECIFIED')
duration = ndb.IntegerProperty()
speaker = ndb.StringProperty(required=True)
class ConferenceForm(messages.Message):
"""ConferenceForm -- Conference outbound form message"""
name = messages.StringField(1)
description = messages.StringField(2)
organizerUserId = messages.StringField(3)
topics = messages.StringField(4, repeated=True)
city = messages.StringField(5)
startDate = messages.StringField(6) # DateTimeField()
month = messages.IntegerField(7)
maxAttendees = messages.IntegerField(8)
seatsAvailable = messages.IntegerField(9)
endDate = messages.StringField(10) # DateTimeField()
websafeKey = messages.StringField(11)
organizerDisplayName = messages.StringField(12)
class SessionForm(messages.Message):
"""SessionForm -- Session outbound form message"""
name = messages.StringField(1)
description = messages.StringField(2)
highlights = messages.StringField(3, repeated=True)
startTime = messages.StringField(4)
sessionDate = messages.StringField(5)
typeOfSession = messages.EnumField('TypeOfSession', 6)
speaker = messages.StringField(7)
websafeKey = messages.StringField(8)
duration = messages.IntegerField(9)
class ConferenceForms(messages.Message):
"""ConferenceForms -- multiple Conference outbound form message"""
items = messages.MessageField(ConferenceForm, 1, repeated=True)
class SessionForms(messages.Message):
"""SessionForms -- multiple Session outbound form message"""
items = messages.MessageField(SessionForm, 1, repeated=True)
class TeeShirtSize(messages.Enum):
"""TeeShirtSize -- t-shirt size enumeration value"""
NOT_SPECIFIED = 1
XS_M = 2
XS_W = 3
S_M = 4
S_W = 5
M_M = 6
M_W = 7
L_M = 8
L_W = 9
XL_M = 10
XL_W = 11
XXL_M = 12
XXL_W = 13
XXXL_M = 14
XXXL_W = 15
class TypeOfSession(messages.Enum):
"""TypeOfSession -- session type enumeration value"""
NOT_SPECIFIED = 1
LECTURE = 2
KEYNOTE = 3
WORKSHOP = 4
FORUM = 5
class ConferenceQueryForm(messages.Message):
"""ConferenceQueryForm -- Conference query inbound form message"""
field = messages.StringField(1)
operator = messages.StringField(2)
value = messages.StringField(3)
class ConferenceQueryForms(messages.Message):
"""ConferenceQueryForms -- multiple ConferenceQueryForm inbound form message"""
filters = messages.MessageField(ConferenceQueryForm, 1, repeated=True) | apache-2.0 | -3,578,478,729,146,258,400 | 33.037037 | 87 | 0.6579 | false |
LeastAuthority/txkube | src/txkube/test/test_authentication.py | 1 | 15146 | # Copyright Least Authority Enterprises.
# See LICENSE for details.
import os
from itertools import count, islice
from uuid import uuid4
from pykube import KubeConfig
import pem
import attr
from pyrsistent import InvariantException
from hypothesis import given
from fixtures import TempDir
from zope.interface.verify import verifyObject
from testtools import ExpectedException
from testtools.matchers import (
AfterPreprocessing, Equals, Contains, IsInstance, raises
)
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.backends import default_backend
from zope.interface import implementer
from twisted.python.compat import unicode
from twisted.python.filepath import FilePath
from twisted.internet.address import IPv4Address
from twisted.internet.error import DNSLookupError
from twisted.internet.interfaces import (
IHostResolution,
IReactorPluggableNameResolver,
IOpenSSLClientConnectionCreator,
)
from twisted.internet.protocol import Factory
from twisted.web.iweb import IPolicyForHTTPS
from twisted.web.http_headers import Headers
from twisted.test.iosim import ConnectionCompleter
from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactorClock
from ..testing import TestCase, assertNoResult, cert
from ..testing.strategies import (
dns_subdomains,
port_numbers,
)
from .._authentication import (
ClientCertificatePolicyForHTTPS,
NetLocation,
Certificates,
Chain,
pairwise,
https_policy_from_config,
)
from .. import authenticate_with_serviceaccount
from ._compat import encode_environ
# Just an arbitrary certificate pulled off the internet. Details ought not
# matter. Retrieved using:
#
# $ openssl s_client -showcerts -connect google.com:443
#
_CA_CERT_PEM = b"""\
-----BEGIN CERTIFICATE-----
MIIDfTCCAuagAwIBAgIDErvmMA0GCSqGSIb3DQEBBQUAME4xCzAJBgNVBAYTAlVT
MRAwDgYDVQQKEwdFcXVpZmF4MS0wKwYDVQQLEyRFcXVpZmF4IFNlY3VyZSBDZXJ0
aWZpY2F0ZSBBdXRob3JpdHkwHhcNMDIwNTIxMDQwMDAwWhcNMTgwODIxMDQwMDAw
WjBCMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEbMBkGA1UE
AxMSR2VvVHJ1c3QgR2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
CgKCAQEA2swYYzD99BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9m
OSm9BXiLnTjoBbdqfnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIu
T8rxh0PBFpVXLVDviS2Aelet8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6c
JmTM386DGXHKTubU1XupGc1V3sjs0l44U+VcT4wt/lAjNvxm5suOpDkZALeVAjmR
Cw7+OC7RHQWa9k0+bw8HHa8sHo9gOeL6NlMTOdReJivbPagUvTLrGAMoUgRx5asz
PeE4uwc2hGKceeoWMPRfwCvocWvk+QIDAQABo4HwMIHtMB8GA1UdIwQYMBaAFEjm
aPkr0rKV10fYIyAQTzOYkJ/UMB0GA1UdDgQWBBTAephojYn7qwVkDBF9qn1luMrM
TjAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjA6BgNVHR8EMzAxMC+g
LaArhilodHRwOi8vY3JsLmdlb3RydXN0LmNvbS9jcmxzL3NlY3VyZWNhLmNybDBO
BgNVHSAERzBFMEMGBFUdIAAwOzA5BggrBgEFBQcCARYtaHR0cHM6Ly93d3cuZ2Vv
dHJ1c3QuY29tL3Jlc291cmNlcy9yZXBvc2l0b3J5MA0GCSqGSIb3DQEBBQUAA4GB
AHbhEm5OSxYShjAGsoEIz/AIx8dxfmbuwu3UOx//8PDITtZDOLC5MH0Y0FWDomrL
NhGc6Ehmo21/uBPUR/6LWlxz/K7ZGzIZOKuXNBSqltLroxwUCEm2u+WR74M26x1W
b8ravHNjkOR/ez4iyz0H7V84dJzjA1BOoa+Y7mHyhD8S
-----END CERTIFICATE-----
"""
# Let hostname u"example.invalid" map to an
# IPv4 address in the TEST-NET range.
HOST_MAP = {
u"example.invalid.": "192.0.2.2"
}
def create_reactor():
"""
Twisted 17.1.0 and higher requires a reactor which implements
``IReactorPluggableNameResolver``.
"""
@implementer(IHostResolution)
@attr.s
class Resolution(object):
name = attr.ib()
class _FakeResolver(object):
def resolveHostName(self, resolutionReceiver, hostName, *args, **kwargs):
portNumber = kwargs.pop('portNumber')
r = Resolution(name=hostName)
resolutionReceiver.resolutionBegan(r)
if hostName in HOST_MAP:
resolutionReceiver.addressResolved(
IPv4Address('TCP', HOST_MAP[hostName], portNumber))
resolutionReceiver.resolutionComplete()
return r
@implementer(IReactorPluggableNameResolver)
class _ResolvingMemoryClockReactor(MemoryReactorClock):
nameResolver = _FakeResolver()
return _ResolvingMemoryClockReactor()
class AuthenticateWithServiceAccountTests(TestCase):
"""
Tests for ``authenticate_with_serviceaccount``.
"""
def _authorized_request(self, token, headers,
kubernetes_host=b"example.invalid."):
"""
Get an agent using ``authenticate_with_serviceaccount`` and issue a
request with it.
:return bytes: The bytes of the request the agent issues.
"""
server = AccumulatingProtocol()
factory = Factory.forProtocol(lambda: server)
factory.protocolConnectionMade = None
reactor = create_reactor()
reactor.listenTCP(80, factory)
t = FilePath(self.useFixture(TempDir()).path)
t = t.asBytesMode()
serviceaccount = t.child(b"serviceaccount")
serviceaccount.makedirs()
serviceaccount.child(b"ca.crt").setContent(_CA_CERT_PEM)
serviceaccount.child(b"token").setContent(token)
environ = encode_environ(
{
u"KUBERNETES_SERVICE_HOST": kubernetes_host.decode("ascii"),
u"KUBERNETES_SERVICE_PORT": u"443"
})
self.patch(os, "environ", environ)
agent = authenticate_with_serviceaccount(
reactor, path=serviceaccount.asTextMode().path,
)
d = agent.request(b"GET", b"http://" + kubernetes_host, headers)
assertNoResult(self, d)
[(host, port, factory, _, _)] = reactor.tcpClients
addr = HOST_MAP.get(kubernetes_host.decode("ascii"), None)
self.expectThat((host, port), Equals((addr, 80)))
pump = ConnectionCompleter(reactor).succeedOnce()
pump.pump()
return server.data
def test_bearer_token_authorization(self):
"""
The ``IAgent`` returned adds an *Authorization* header to each request it
issues. The header includes the bearer token from the service account
file.
"""
token = str(uuid4())
if isinstance(token, unicode):
token = token.encode("ascii")
request_bytes = self._authorized_request(token=token, headers=None)
# Sure would be nice to have an HTTP parser.
self.assertThat(
request_bytes,
Contains(b"Authorization: Bearer " + token),
)
def test_hostname_does_not_resolve(self):
"""
Specifying a hostname which cannot be resolved to an
IP address will result in an ``DNSLookupError``.
"""
with ExpectedException(DNSLookupError, "DNS lookup failed: no results "
"for hostname lookup: doesnotresolve."):
self._authorized_request(
token=b"test",
headers=Headers({}),
kubernetes_host=b"doesnotresolve"
)
def test_other_headers_preserved(self):
"""
Other headers passed to the ``IAgent.request`` implementation are also
sent in the request.
"""
token = str(uuid4())
if isinstance(token, unicode):
token = token.encode("ascii")
headers = Headers({u"foo": [u"bar"]})
request_bytes = self._authorized_request(token=token, headers=headers)
self.expectThat(
request_bytes,
Contains(b"Authorization: Bearer " + token),
)
self.expectThat(
request_bytes,
Contains(b"Foo: bar"),
)
class HTTPSPolicyFromConfigTests(TestCase):
"""
Tests for ``https_policy_from_config``.
"""
def test_policy(self):
"""
``https_policy_from_config`` returns a ``ClientCertificatePolicyForHTTPS``
with no credentials but with trust roots taken from the Kubernetes
*serviceaccount* directory it is pointed at. It also respects
*KUBERNETES_...* environment variables to identify the address of the
server.
"""
t = FilePath(self.useFixture(TempDir()).path)
t = t.asBytesMode()
serviceaccount = t.child(b"serviceaccount")
serviceaccount.makedirs()
serviceaccount.child(b"ca.crt").setContent(_CA_CERT_PEM)
serviceaccount.child(b"token").setContent(b"token")
netloc = NetLocation(host=u"example.invalid", port=443)
environ = encode_environ({
u"KUBERNETES_SERVICE_HOST": netloc.host,
u"KUBERNETES_SERVICE_PORT": u"{}".format(netloc.port),
})
self.patch(os, "environ", environ)
config = KubeConfig.from_service_account(path=serviceaccount.asTextMode().path)
policy = https_policy_from_config(config)
self.expectThat(
policy,
Equals(
ClientCertificatePolicyForHTTPS(
credentials={},
trust_roots={
netloc: pem.parse(_CA_CERT_PEM)[0],
},
),
),
)
def test_missing_ca_certificate(self):
"""
If no CA certificate is found in the service account directory,
``https_policy_from_config`` raises ``ValueError``.
"""
t = FilePath(self.useFixture(TempDir()).path)
t = t.asBytesMode()
serviceaccount = t.child(b"serviceaccount")
serviceaccount.makedirs()
serviceaccount.child(b"ca.crt").setContent(b"not a cert pem")
serviceaccount.child(b"token").setContent(b"token")
environ = encode_environ({
u"KUBERNETES_SERVICE_HOST": u"example.invalid.",
u"KUBERNETES_SERVICE_PORT": u"443",
})
self.patch(os, "environ", environ)
config = KubeConfig.from_service_account(path=serviceaccount.asTextMode().path)
self.assertThat(
lambda: https_policy_from_config(config),
raises(ValueError("No certificate authority certificate found.")),
)
def test_bad_ca_certificate(self):
"""
If no CA certificate is found in the service account directory,
``https_policy_from_config`` raises ``ValueError``.
"""
t = FilePath(self.useFixture(TempDir()).path)
t = t.asBytesMode()
serviceaccount = t.child(b"serviceaccount")
serviceaccount.makedirs()
serviceaccount.child(b"ca.crt").setContent(
b"-----BEGIN CERTIFICATE-----\n"
b"not a cert pem\n"
b"-----END CERTIFICATE-----\n"
)
serviceaccount.child(b"token").setContent(b"token")
environ = encode_environ({
u"KUBERNETES_SERVICE_HOST": u"example.invalid.",
u"KUBERNETES_SERVICE_PORT": u"443",
})
self.patch(os, "environ", environ)
config = KubeConfig.from_service_account(path=serviceaccount.asTextMode().path)
self.assertThat(
lambda: https_policy_from_config(config),
raises(ValueError(
"Invalid certificate authority certificate found.",
"[('PEM routines', 'PEM_read_bio', 'bad base64 decode')]",
)),
)
class ClientCertificatePolicyForHTTPSTests(TestCase):
"""
Tests for ``ClientCertificatePolicyForHTTPS``.
"""
def test_interface(self):
"""
``ClientCertificatePolicyForHTTPS`` instances provide ``IPolicyForHTTPS``.
"""
policy = ClientCertificatePolicyForHTTPS(
credentials={},
trust_roots={},
)
verifyObject(IPolicyForHTTPS, policy)
@given(dns_subdomains(), dns_subdomains(), port_numbers(), port_numbers())
def test_creatorForNetLoc_interface(self, host_known, host_used, port_known, port_used):
"""
``ClientCertificatePolicyForHTTPS.creatorForNetloc`` returns an object
that provides ``IOpenSSLClientConnectionCreator``.
"""
netloc = NetLocation(host=host_known, port=port_known)
cert = pem.parse(_CA_CERT_PEM)[0]
policy = ClientCertificatePolicyForHTTPS(
credentials={},
trust_roots={
netloc: cert,
},
)
creator = policy.creatorForNetloc(
host_used.encode("ascii"),
port_used,
)
verifyObject(IOpenSSLClientConnectionCreator, creator)
class PairwiseTests(TestCase):
"""
Tests for ``pairwise``.
"""
def test_pairs(self):
a = object()
b = object()
c = object()
d = object()
self.expectThat(
pairwise([]),
AfterPreprocessing(list, Equals([])),
)
self.expectThat(
pairwise([a]),
AfterPreprocessing(list, Equals([])),
)
self.expectThat(
pairwise([a, b]),
AfterPreprocessing(list, Equals([(a, b)])),
)
self.expectThat(
pairwise([a, b, c]),
AfterPreprocessing(list, Equals([(a, b), (b, c)])),
)
self.expectThat(
pairwise([a, b, c, d]),
AfterPreprocessing(list, Equals([(a, b), (b, c), (c, d)])),
)
def test_lazy(self):
"""
``pairwise`` only consumes as much of its iterable argument as necessary
to satisfy iteration of its own result.
"""
self.expectThat(
islice(pairwise(count()), 3),
AfterPreprocessing(list, Equals([(0, 1), (1, 2), (2, 3)])),
)
class ChainTests(TestCase):
"""
Tests for ``Chain``.
"""
def test_empty(self):
"""
A ``Chain`` must have certificates.
"""
self.assertRaises(
InvariantException,
lambda: Chain(certificates=Certificates([])),
)
def test_ordering(self):
"""
Each certificate in ``Chain`` must be signed by the following certificate.
"""
a_key, b_key, c_key = tuple(
rsa.generate_private_key(
public_exponent=65537,
key_size=512,
backend=default_backend(),
)
for i in range(3)
)
a_cert = cert(u"a.invalid", u"a.invalid", a_key.public_key(), a_key, True)
b_cert = cert(u"a.invalid", u"b.invalid", b_key.public_key(), a_key, True)
c_cert = cert(u"b.invalid", u"c.invalid", c_key.public_key(), b_key, False)
a, b, c = pem.parse(b"\n".join(
cert.public_bytes(serialization.Encoding.PEM)
for cert
in (a_cert, b_cert, c_cert)
))
# a is not signed by b. Rather, the reverse. Therefore this ordering
# is an error.
self.expectThat(
lambda: Chain(certificates=Certificates([c, a, b])),
raises(InvariantException),
)
# c is signed by b and b is signed by a. Therefore this is perfect.
self.expectThat(
Chain(certificates=Certificates([c, b, a])),
IsInstance(Chain),
)
| mit | -7,816,677,622,489,357,000 | 31.363248 | 92 | 0.630794 | false |
sensbio/sensbiotk | examples/scripts/expe_prima.py | 1 | 4757 |
# -*- coding: utf-8 -*-
"""
Reconstruction angles example comparison
"""
import numpy as np
from sensbiotk.algorithms import martin_ahrs
from sensbiotk.algorithms.basic import find_static_periods
from sensbiotk.io.iofox import load_foxcsvfile
from sensbiotk.io.ahrs import save_ahrs_csvfile
import sensbiotk.calib.calib as calib
from sensbiotk.transforms3d import quaternions as nq
from sensbiotk.transforms3d.eulerangles import quat2euler
from sensbiotk.transforms3d.quaternions import quat2mat
from visual import *
import scipy.io
import matplotlib.pyplot as plt
DATACALIBFILE = "data/calib01_imu.csv"
CALIBFILE= "data/calib_imu.txt"
DATAFILE = "data/expe02_imu.csv"
ANGLEFILE = "data/angle02_imu.csv"
def plot_quat(title, timu, qw, qx, qy, qz):
""" Plot quaternion
"""
plt.figure()
plt.title(title+" Quaternion")
plt.plot(timu, qw)
plt.plot(timu, qx)
plt.plot(timu, qy)
plt.plot(timu, qz)
plt.legend(('qw', 'qx', 'qy', 'qz'))
return
def plot_euler(title, time, phi, theta, psi):
""" Plot euler angles
"""
plt.figure()
plt.title(title+" Euler angles")
plt.plot(time, phi*180/np.pi)
plt.plot(time, theta*180/np.pi)
plt.plot(time, psi*180/np.pi)
plt.legend(('e_x', 'e_y', 'e_z'))
return
def calib_param(compute = True):
""" Load or compute calibration parameters
"""
if compute == True :
[params_acc, params_mag, params_gyr] = \
calib.compute(imuNumber=5 ,filepath=DATACALIBFILE, param = 3)
calib.save_param(CALIBFILE,
params_acc, params_mag, params_gyr, comments="Expe Prima")
else:
[params_acc, params_mag, params_gyr] = \
calib.load_param(CALIBFILE)
return [params_acc, params_mag, params_gyr]
def normalize_data(data, param_calib):
""" normalize_data
"""
scale = param_calib[1:4,:]
bias = param_calib[0,:]
data_n = np.transpose(np.dot(scale,np.transpose((data-np.transpose(bias)))))
return data_n
def run_example():
""" run example : "martin"
"""
# Compute (True) or load (False
[params_acc, params_mag, params_gyr] = calib_param(compute = False)
# Load the recording data
[time_sens, accx, accy, accz, mx, my, mz, gyrx, gyry, gyrz] = \
load_foxcsvfile(DATAFILE)
# Find motionless begin periods
freqs = 200
start, end = find_static_periods(gyrz, 2 * np.pi/180, 3*freqs)
static_duration = time_sens[end[0]] - time_sens[start[0]]
print "LGHT", start[0], len(end)
if static_duration < 5.0 :
print "Warning: static duration too low"
time_imu = time_sens
acc_imu = np.column_stack([accx, accy, accz])
mag_imu = np.column_stack([mx, my, mz])
gyr_imu = np.column_stack([gyrx, gyry, gyrz])
# Init output
quat = np.zeros((len(acc_imu),4))
euler = np.zeros((len(acc_imu),3))
observer = martin_ahrs.martin_ahrs()
quat_offset = [1, 0, 0, 0]
# Initialization loop
for i in range(0, end[0]):
# Applies the Scale and Offset to data
acc_imu[i,:] = normalize_data(acc_imu[i,:], params_acc)
mag_imu[i,:] = normalize_data(mag_imu[i,:], params_mag)
gyr_imu[i,:] = normalize_data(gyr_imu[i,:], params_gyr)
# Filter call
if i == 0:
quat[0]=observer.init_observer(np.hstack([acc_imu[0,:],
mag_imu[0,:], gyr_imu[0,:]]))
else:
quat[i]=observer.update(np.hstack([acc_imu[i,:],
mag_imu[i,:], gyr_imu[i,:]]), 0.005)
quat_offset = nq.conjugate(quat[end-1][0])
print "Quaternion init", quat_offset
# Computation loop
for i in range(end[0], len(acc_imu)):
# Applies the Scale and Offset to data
acc_imu[i,:] = normalize_data(acc_imu[i,:], params_acc)
mag_imu[i,:] = normalize_data(mag_imu[i,:], params_mag)
gyr_imu[i,:] = normalize_data(gyr_imu[i,:], params_gyr)
# Filter call
quat[i]=observer.update(np.hstack([acc_imu[i,:],
mag_imu[i,:], gyr_imu[i,:]]), 0.005)
quat[i] = nq.mult(quat_offset, quat[i])
euler[i]=quat2euler(quat[i])
# Plot results
plot_quat("Expe Prima ", time_imu,\
quat[:,0], quat[:,1], quat[:,2], quat[:,3])
plot_euler("Expe Prima ", time_imu,\
euler[:,2], euler[:,1], euler[:,0])
# Save results
save_ahrs_csvfile(ANGLEFILE, time_imu, quat, euler)
if __name__ == '__main__':
run_example()
plt.show()
| gpl-3.0 | -5,332,932,333,264,223,000 | 31.737589 | 83 | 0.570107 | false |
Spandex-at-Exeter/demography_database | app/matrix_functions.py | 1 | 14553 | from models import Permission, Role, User, IUCNStatus, OrganismType, GrowthFormRaunkiaer, ReproductiveRepetition, \
DicotMonoc, AngioGymno, SpandExGrowthType, SourceType, Database, Purpose, MissingData, ContentEmail, Ecoregion, Continent, InvasiveStatusStudy, InvasiveStatusElsewhere, StageTypeClass, \
TransitionType, MatrixComposition, StartSeason, EndSeason, StudiedSex, Captivity, Species, Taxonomy, PurposeEndangered, PurposeWeed, Trait, \
Publication, AuthorContact, AdditionalSource, Population, Stage, StageType, Treatment, \
MatrixStage, MatrixValue, Matrix, Interval, Fixed, Small, CensusTiming, Institute, Status, Version, ChangeLogger
import numpy as np
#mat_str = "[0 0 2.81;0.5 0 0;0 0.45 0.45]" # for testing
def as_array(mat_str):
# input: matlab format matrix
# output:
try:
mat_str = mat_str[1:(len(mat_str)-1)].replace(";"," ").split()
mat_str = [float(i) for i in mat_str]
mat_str = np.array(mat_str)
order = int(np.sqrt(len(mat_str)))
shape = (order,order)
try:
mat_str = mat_str.reshape(shape)
return(mat_str)
except ValueError:
return("NA")
except:
return("NA")
def calc_lambda(matA):
matA = as_array(matA)
# input: matrix in string matlab format
# output: float
if matA != "NA":
w, v = np.linalg.eig(matA)
return float(max(w))
else:
return(None)
def calc_surv_issue(matU):
matU = as_array(matU)
# input: matrix in string matlab format
# output: float
if matU != "NA":
column_sums = [sum([row[i] for row in matU]) for i in range(0,len(matU[0]))]
return max(column_sums)
else:
return(None)
def is_matrix_irreducible(matA):
matA = as_array(matA)
# input: matrix in string matlab format
# output: 0 or 1
if matA != "NA":
order = np.shape(matA)[0]
I = np.matrix(np.identity(order))
IplusA = I + matA
powermatrix = np.linalg.matrix_power(IplusA, (order - 1))
minval = powermatrix.min()
if minval > 0:
return(1)
else:
return(0)
else:
return(None)
def is_matrix_primitive(matA):
matA = as_array(matA)
# input: matrix in string matlab format
# output: 0 or 1
if matA != "NA":
order = np.shape(matA)[0]
powermatrix = np.linalg.matrix_power(matA,((order ** 2) - (2 * order) + 2))
minval = powermatrix.min()
if minval > 0:
return(1)
else:
return(0)
else:
return(None)
def is_matrix_ergodic(matA):
matA = as_array(matA)
# input: matrix in string matlab format
# output: 0 or 1
if matA != "NA":
digits = 12
order = np.shape(matA)[0]
lw, lv = np.linalg.eig(np.transpose(matA))
lmax = lw.tolist().index(max(lw))
v = lv[:,lmax]
Rev = abs(np.real(v))
Rev = np.round(Rev,decimals = digits)
if min(Rev) > 0:
return(1)
else:
return(0)
else:
return(None)
###### Some functions to create summary statistics on the front-end
####Structure of functions
###a. unreleased and incomplete
###b. unreleased and complete aka. ready for release
###c. released and complete
###d. released but missing stuff
##Each of these 4 categories is split into 3 subsections (all; compadre; comadre)
#Each of these 3 subsections is split into 3 sections (species;populations;matrices)
##### a. unreleased and incomplete (amber) ######
###Note these won't work yet until database is related to the Population.model which it isn't atm
## All ##
# #Species
# def all_species_unreleased():
# all_species_unreleased = Population.query.join(Database).filter(Database.database_master_version=="X").count()
# return all_species_unreleased
# #Populations
# def all_populations_unreleased():
# all_populations_unreleased = Population.query.join(Database).filter(Database.database_master_version=="X").count()
# return all_populations_unreleased
# #Matrices
# def all_matrices_unreleased():
# all_matrices_unreleased = Population.query.join(Database).filter(Database.database_master_version=="X").count()
# return all_matrices_unreleased
## COMPADRE ##
#Species.join(Population.species).join(Species.taxonomy).filter(Taxonomy.kingdom == "Plantae").count()
#def compadre_species_unreleased():
# compadre_species_unreleased = Species.query.join(Version).join(Version.statuses).filter(Status.status_name=="Amber").join.(Species.taxonomy).filter(Taxonomy.kingdom == "Plantae").join(Population).join(Population.database).filter(Database.database_name=="Unreleased").count()
# return compadre_species_unreleased
#Populations
#Matrices
## COMADRE ##
#Species
#def comadre_species_unreleased():
# comadre_species_unreleased = Species.query.join(Version).join(Version.statuses).filter(Status.status_name=="Amber").join.(Species.taxonomy).filter(Taxonomy.kingdom == "Animalia").join(Population).join(Population.database).filter(Database.database_name=="Unreleased").count()
# return comadre_species_unreleased
#Populations
#Matrices
# ##### b. unreleased and complete aka. ready for release (green) ######
# ## All ##
# #Species
# def all_species_unreleased_complete():
# all_species_unreleased_complete = Population.query.join(Database).filter(Database.database_master_version=="X").count()
# return all_species_unreleased_complete
# #Populations
# def all_populations_unreleased_complete():
# all_populations_unreleased_complete = Population.query.join(Database).filter(Database.database_master_version=="X").count()
# return all_populations_unreleased_complete
# #Matrices
# def all_matrices_unreleased_complete():
# all_matrices_unreleased_complete = Population.query.join(Database).filter(Database.database_master_version=="X").count()
# return all_matrices_unreleased_complete
## COMPADRE ##
#Species
#Populations
#Matrices
## COMADRE ##
#Species
# #Populations
# #Matrices
# ###c. released and complete
# ## ALL ##
# #Species
# def all_species_released_complete():
# all_species_released_complete = Population.query.join(Database).filter(Database.database_master_version=="X").count()
# return all_species_released_complete
# #Populations
# def all_populations_released_complete():
# all_populations_released_complete = Population.query.join(Database).filter(Database.database_master_version=="X").count()
# return all_populations_released_complete
# #Matrices
# def all_matrices_released_complete():
# all_matrices_released_complete = Population.query.join(Database).filter(Database.database_master_version=="X").count()
# return all_matrices_released_complete
# ## COMPADRE ## - when new versions of COMPADRE come out, these will need new versions added to get an accurate summary
# #Species
# def all_species_released_compadre():
# # all_species_released_2 = Species.query.join(Version).join(Version.statuses).filter(Status.status_name=="Green").join(Population).join(Population.database).filter(Database.database_name=="3.2.1").count()
# # all_species_released_3 = Species.query.join(Version).join(Version.statuses).filter(Status.status_name=="Green").join(Population).join(Population.database).filter(Database.database_name=="4.0.1").count()
# # all_species_released_4 = Species.query.join(Version).join(Version.statuses).filter(Status.status_name=="Green").join(Population).join(Population.database).filter(Database.database_name=="3.0.0").count()
# all_species_released_compadre = Population.query.join(Database).filter(Database.database_master_version=="X").count()
# return all_species_released_compadre
# #Populations
# def all_populations_released_compadre():
# all_populations_released_compadre = Population.query.join(Database).filter(Database.database_master_version=="X").count()
# return all_populations_released_compadre
# #Matrices
# def all_matrices_released_compadre():
# # all_matrices_released_2 = Matrix.query.join(Version).join(Version.statuses).filter(Status.status_name=="Green").join(Population).join(Population.database).filter(Database.database_name=="3.2.1").count()
# # all_matrices_released_3 = Matrix.query.join(Version).join(Version.statuses).filter(Status.status_name=="Green").join(Population).join(Population.database).filter(Database.database_name=="4.0.1").count()
# # all_matrices_released_4 = Matrix.query.join(Version).join(Version.statuses).filter(Status.status_name=="Green").join(Population).join(Population.database).filter(Database.database_name=="3.0.0").count()
# all_matrices_released_compadre = Population.query.join(Database).filter(Database.database_master_version=="X").count()
# return all_matrices_released_compadre
# ## COMADRE ##
# #Species
# def all_species_released_comadre():
# # all_species_released_5 = Species.query.join(Version).join(Version.statuses).filter(Status.status_name=="Green").join(Population).join(Population.database).filter(Database.database_name=="2.0.1").count()
# # all_species_released_6 = Species.query.join(Version).join(Version.statuses).filter(Status.status_name=="Green").join(Population).join(Population.database).filter(Database.database_name=="1.0.0").count()
# all_species_released_comadre = Population.query.join(Database).filter(Database.database_master_version=="X").count()
# return all_species_released_comadre
# #Populations
# def all_populations_released_comadre():
# # all_populations_released_5 = Population.query.join(Version).join(Version.statuses).filter(Status.status_name=="Green").join(Database).filter(Database.database_name=="2.0.1").count()
# # all_populations_released_6 = Population.query.join(Version).join(Version.statuses).filter(Status.status_name=="Green").join(Database).filter(Database.database_name=="1.0.0").count()
# all_populations_released_comadre = Population.query.join(Database).filter(Database.database_master_version=="X").count()
# return all_populations_released_comadre
# #Matrices
# def all_matrices_released_comadre():
# # all_matrices_released_5 = Matrix.query.join(Version).join(Version.statuses).filter(Status.status_name=="Green").join(Population).join(Population.database).filter(Database.database_name=="2.0.1").count()
# # all_matrices_released_6 = Matrix.query.join(Version).join(Version.statuses).filter(Status.status_name=="Green").join(Population).join(Population.database).filter(Database.database_name=="1.0.0").count()
# all_matrices_released_comadre = Population.query.join(Database).filter(Database.database_master_version=="X").count()
# return all_matrices_released_comadre
# ###d. released but missing stuff
## ALL ##
#Species
#Populations
#Matrices
## COMPADRE ##
#Species
# #Populations
# #Matrices
# ## COMADRE ##
# #Species
# #Populations
# ######Admin Use Only#######
# ###Count function for admin areas - Total sums###
# def all_matrices():
# all_matrices_count = Matrix.query.count()
# return all_matrices_count
# ##All_populations
# def all_pops():
# all_pops_count = Population.query.count()
# return all_pops_count
# ##All_species
# def all_species():
# all_species = Species.query.count()
# return all_species
# ##All. matrices in compadre (plants only)
# def count_plants():
# count_plants = Matrix.query.join(Matrix.population).join(Population.species).join(Species.taxonomy).filter(Taxonomy.kingdom == "Plantae").count()
# return count_plants
# ##All. matrices in comadre (animalia only)
# def count_comadre():
# count_comadre = Matrix.query.join(Matrix.population).join(Population.species).join(Species.taxonomy).filter(Taxonomy.kingdom == "Animalia").count()
# return count_comadre
# ##No. matrices in compadre (plants, fungi and algae)
# def count_compadre():
# count_fungi = Matrix.query.join(Matrix.population).join(Population.species).join(Species.taxonomy).filter(Taxonomy.kingdom == "Plantae").count()
# count_chromista = Matrix.query.join(Matrix.population).join(Population.species).join(Species.taxonomy).filter(Taxonomy.kingdom == "Chromista").count()
# count_chromalveolata = Matrix.query.join(Matrix.population).join(Population.species).join(Species.taxonomy).filter(Taxonomy.kingdom == "Chromalveolata").count()
# count_compadre = count_plants() + count_fungi + count_chromista + count_chromalveolata
# return count_compadre
# ##No. populations in compadre (plants only)
# def count_plants_pop():
# count_plants_pop = Population.query.join(Species).join(Species.taxonomy).filter(Taxonomy.kingdom == "Plantae").count()
# return count_plants_pop
# ##No. populations in compadre (plants, fungi and algae)
# def count_compadre_pop():
# count_chromista_pop = Population.query.join(Species).join(Species.taxonomy).filter(Taxonomy.kingdom == "Chromista").count()
# count_chromalveolta_pop = Population.query.join(Species).join(Species.taxonomy).filter(Taxonomy.kingdom == "Chromalveolata").count()
# count_fungi_pop = Population.query.join(Species).join(Species.taxonomy).filter(Taxonomy.kingdom == "Fungi").count()
# count_compadre_pop = count_plants_pop() + count_chromalveolta_pop + count_chromista_pop + count_fungi_pop
# return count_compadre_pop
# ##No. populations in comadre (animalia only)
# def count_comadre_pop():
# count_comadre_pop = Population.query.join(Species).join(Species.taxonomy).filter(Taxonomy.kingdom == "Animalia").count()
# return count_comadre_pop
# ##No. compadre species inc. fungi, algae, etc. admin
# def species_compadre_count():
# species_chromista_count = Species.query.join(Taxonomy).filter(Taxonomy.kingdom == "Chromista").count()
# species_chromalveolta_count = Species.query.join(Taxonomy).filter(Taxonomy.kingdom == "Chromalveolta").count()
# species_fungi_count = Species.query.join(Taxonomy).filter(Taxonomy.kingdom == "Fungi").count()
# species_plant_count = Species.query.join(Taxonomy).filter(Taxonomy.kingdom == "Plantae").count()
# species_compadre_count = Species.query.join(Taxonomy).filter(Taxonomy.kingdom == "Plantae").count()
# return species_compadre_count
# ##No. comadre species admin
# def species_comadre_count():
# species_comadre_count = Species.query.join(Taxonomy).filter(Taxonomy.kingdom == "Animalia").count()
# return species_comadre_count
| mit | 9,208,999,180,805,464,000 | 42.056213 | 279 | 0.698413 | false |
thepian/theapps | theapps/supervisor/sites.py | 1 | 1074 | from django.conf import settings
class SiteManager(object):
def __init__(self):
self.cur = None
# Map of Site instances
self.sites = {}
def get_current(self):
if not self.cur:
self.cur = Site()
return self.cur
def get_default_site(self):
return self.get_site('www.' + settings.DOMAINS[0])
def get_site(self,host):
if host in self.sites:
return self.sites[host]
#TODO consider domain redirection rules
site = Site()
site.domain = host
site.base_domain = settings.DOMAINS[0].startswith(".") and settings.DOMAINS[0] or "."+settings.DOMAINS[0]
for d in settings.DOMAINS:
host.endswith(d)
site.base_doamin = d
site.name = settings.SITE_TITLE
self.sites[host] = site
return site
class Site(object):
domain = "www.thepia.com"
name = "Thepia Site"
objects = SiteManager()
def __repr__(self):
return self.domain+":"+self.name
| gpl-3.0 | 8,910,944,919,843,968,000 | 25.85 | 113 | 0.555866 | false |
boldfield/s3-encryption | s3_encryption/crypto.py | 1 | 1913 | from Crypto import Random
from Crypto.Cipher import AES as pyAES
import codecs
class AES(object):
def __init__(self):
self.key = None
self._mode = None
self.iv = None
@staticmethod
def str_to_bytes(data):
t = type(b''.decode('utf-8'))
if isinstance(data, t):
return codecs.encode(data, 'utf-8')
return data
def encrypt(self, data):
if self.iv is None:
cipher = pyAES.new(self.key, self.mode)
else:
cipher = pyAES.new(self.key, self.mode, self.iv)
return cipher.encrypt(pad_data(AES.str_to_bytes(data)))
def decrypt(self, data):
if self.iv is None:
cipher = pyAES.new(self.key, self.mode)
else:
cipher = pyAES.new(self.key, self.mode, self.iv)
return unpad_data(cipher.decrypt(AES.str_to_bytes(data)))
@property
def mode(self):
return self._mode
@mode.setter
def mode(self, mode):
m = 'MODE_{}'.format(mode.upper()) if not mode.startswith('MODE') else mode
self._mode = getattr(pyAES, m)
def aes_cipher(key=None, iv=None, mode=None):
aes = AES()
aes.iv = iv if iv else None
aes.mode = mode if mode else None
aes.key = key if key else None
return aes
def aes_encrypt(key, data, mode='ECB', iv=None):
aes = AES()
aes.mode = mode
aes.iv = iv
aes.key = key
return aes.encrypt(data)
def aes_decrypt(key, data, mode='ECB', iv=None):
aes = AES()
aes.mode = mode
aes.iv = iv
aes.key = key
return aes.decrypt(data)
def aes_iv():
return Random.new().read(pyAES.block_size)
def aes_key():
return Random.new().read(pyAES.block_size)
pad_data = lambda s: s + (pyAES.block_size - len(s) % pyAES.block_size) * AES.str_to_bytes(chr(pyAES.block_size - len(s) % pyAES.block_size))
unpad_data = lambda s: s[0:-ord(s[len(s)-1:])]
| bsd-3-clause | -4,556,593,097,893,904,400 | 23.844156 | 141 | 0.591218 | false |
AutorestCI/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/azure_reachability_report_parameters.py | 1 | 2096 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AzureReachabilityReportParameters(Model):
"""Geographic and time constraints for Azure reachability report.
:param provider_location:
:type provider_location:
~azure.mgmt.network.v2017_11_01.models.AzureReachabilityReportLocation
:param providers: List of Internet service providers.
:type providers: list[str]
:param azure_locations: Optional Azure regions to scope the query to.
:type azure_locations: list[str]
:param start_time: The start time for the Azure reachability report.
:type start_time: datetime
:param end_time: The end time for the Azure reachability report.
:type end_time: datetime
"""
_validation = {
'provider_location': {'required': True},
'start_time': {'required': True},
'end_time': {'required': True},
}
_attribute_map = {
'provider_location': {'key': 'providerLocation', 'type': 'AzureReachabilityReportLocation'},
'providers': {'key': 'providers', 'type': '[str]'},
'azure_locations': {'key': 'azureLocations', 'type': '[str]'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
}
def __init__(self, provider_location, start_time, end_time, providers=None, azure_locations=None):
super(AzureReachabilityReportParameters, self).__init__()
self.provider_location = provider_location
self.providers = providers
self.azure_locations = azure_locations
self.start_time = start_time
self.end_time = end_time
| mit | -2,620,773,727,717,192,700 | 40.098039 | 102 | 0.623569 | false |
Micronaet/micronaet-product | duty_management/__init__.py | 1 | 1034 | # -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from . import duty
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 5,231,561,752,225,363,000 | 43.956522 | 79 | 0.609284 | false |
TheHonestGene/risk-predictor | setup.py | 1 | 1882 | from setuptools import setup, find_packages # Always prefer setuptools over distutils
from codecs import open # To use a consistent encoding
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='RiskPredictor',
version='0.0.1',
description='A library to predict risks',
long_description=long_description,
url='https://github.com/TheHonestGene/riskpredictor',
author=['Bjarni Vilhjalmsson','Uemit Seren'],
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
keywords='Risk Prediction',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
setup_requires=['pytest-runner'],
tests_require=[
"pytest",
"pytest-cov",
"pytest-pep8",
"coverage"
],
install_requires=[
"numpy",
"scipy",
"h5py",
"plinkio",
"pandas",
"imputor==0.0.1",
"matplotlib >= 1.4.3",
],
dependency_links=['https://github.com/TheHonestGene/imputor/tarball/master#egg=imputor-0.0.1'],
entry_points={
'console_scripts': [
'riskpredictor=riskpredictor:main'
],
},
)
| mit | -6,642,484,730,661,453,000 | 31.448276 | 99 | 0.605739 | false |
pwarren/AGDeviceControl | agdevicecontrol/thirdparty/site-packages/darwin/phidgets/servomotor.py | 1 | 3317 | # This file was created automatically by SWIG.
# Don't modify this file, modify the SWIG interface instead.
# This file is compatible with both classic and new-style classes.
import _servomotor
def _swig_setattr(self,class_type,name,value):
if (name == "this"):
if isinstance(value, class_type):
self.__dict__[name] = value.this
if hasattr(value,"thisown"): self.__dict__["thisown"] = value.thisown
del value.thisown
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
self.__dict__[name] = value
def _swig_getattr(self,class_type,name):
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError,name
import types
try:
_object = types.ObjectType
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
del types
class PhidgetServoMotor(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, PhidgetServoMotor, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, PhidgetServoMotor, name)
def __repr__(self):
return "<C PhidgetServoMotor instance at %s>" % (self.this,)
__swig_setmethods__["min_pulse"] = _servomotor.PhidgetServoMotor_min_pulse_set
__swig_getmethods__["min_pulse"] = _servomotor.PhidgetServoMotor_min_pulse_get
if _newclass:min_pulse = property(_servomotor.PhidgetServoMotor_min_pulse_get, _servomotor.PhidgetServoMotor_min_pulse_set)
__swig_setmethods__["max_pulse"] = _servomotor.PhidgetServoMotor_max_pulse_set
__swig_getmethods__["max_pulse"] = _servomotor.PhidgetServoMotor_max_pulse_get
if _newclass:max_pulse = property(_servomotor.PhidgetServoMotor_max_pulse_get, _servomotor.PhidgetServoMotor_max_pulse_set)
__swig_setmethods__["factor"] = _servomotor.PhidgetServoMotor_factor_set
__swig_getmethods__["factor"] = _servomotor.PhidgetServoMotor_factor_get
if _newclass:factor = property(_servomotor.PhidgetServoMotor_factor_get, _servomotor.PhidgetServoMotor_factor_set)
__swig_setmethods__["position"] = _servomotor.PhidgetServoMotor_position_set
__swig_getmethods__["position"] = _servomotor.PhidgetServoMotor_position_get
if _newclass:position = property(_servomotor.PhidgetServoMotor_position_get, _servomotor.PhidgetServoMotor_position_set)
def __init__(self, *args):
_swig_setattr(self, PhidgetServoMotor, 'this', _servomotor.new_PhidgetServoMotor(*args))
_swig_setattr(self, PhidgetServoMotor, 'thisown', 1)
def __del__(self, destroy=_servomotor.delete_PhidgetServoMotor):
try:
if self.thisown: destroy(self)
except: pass
class PhidgetServoMotorPtr(PhidgetServoMotor):
def __init__(self, this):
_swig_setattr(self, PhidgetServoMotor, 'this', this)
if not hasattr(self,"thisown"): _swig_setattr(self, PhidgetServoMotor, 'thisown', 0)
_swig_setattr(self, PhidgetServoMotor,self.__class__,PhidgetServoMotor)
_servomotor.PhidgetServoMotor_swigregister(PhidgetServoMotorPtr)
phidget_reset_PhidgetServoMotor = _servomotor.phidget_reset_PhidgetServoMotor
phidget_servomotor_set_parameters = _servomotor.phidget_servomotor_set_parameters
| gpl-2.0 | 6,873,438,704,202,684,000 | 45.71831 | 127 | 0.708472 | false |
skosukhin/spack | var/spack/repos/builtin/packages/xfontsel/package.py | 1 | 1912 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Xfontsel(AutotoolsPackage):
"""xfontsel application provides a simple way to display the X11 core
protocol fonts known to your X server, examine samples of each, and
retrieve the X Logical Font Description ("XLFD") full name for a font."""
homepage = "http://cgit.freedesktop.org/xorg/app/xfontsel"
url = "https://www.x.org/archive/individual/app/xfontsel-1.0.5.tar.gz"
version('1.0.5', '72a35e7fa786eb2b0194d75eeb4a02e3')
depends_on('libxaw')
depends_on('libxmu')
depends_on('libxt')
depends_on('libx11')
depends_on('[email protected]:', type='build')
depends_on('util-macros', type='build')
| lgpl-2.1 | 4,634,601,404,021,808,000 | 42.454545 | 79 | 0.676255 | false |
snarasi/django-bookmarkApp | bookmark/urls.py | 1 | 1208 | """bookworm URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import patterns, include
from django.conf.urls import url
from django.contrib import admin
from login.views import *
urlpatterns = [
url(r'^admin/', admin.site.urls),
#url(r'^$','bookwormApp.views.main',main),
url(r'^$', 'django.contrib.auth.views.login'),
url(r'^logout/$', logout_page),
url(r'^accounts/login/$', 'django.contrib.auth.views.login'),
url(r'^register/$', register),
url(r'^register/success/$', register_success),
url(r'^home/$', home),
url(r'^fileUpload/success/$', fileupload_success),
]
| gpl-3.0 | 6,150,545,808,403,012,000 | 35.606061 | 79 | 0.679636 | false |
brosner/django-sqlalchemy | django_sqlalchemy/management/sql.py | 1 | 1402 | from django.db.models.loading import get_models
from django.core.management.sql import custom_sql_for_model
from sqlalchemy import create_engine
from django_sqlalchemy.backend import metadata, session
def reset(engine, app):
metadata.drop_all(engine, tables=_get_tables_for_app(app))
session.commit()
def create(engine, app):
metadata.create_all(engine, tables=_get_tables_for_app(app))
session.commit()
def _get_tables_for_app(app):
tables = []
for model in get_models(app):
tables.append(model.__table__)
tables.extend([f.__table__ for f in model._meta.local_many_to_many])
return tables
def process_custom_sql(models, verbosity):
# TODO: complete this
# install custom sql for the specified models
for model in models:
custom_sql = custom_sql_for_model(model)
if custom_sql:
if verbosity >= 1:
print "Installing custom SQL for %s.%s model" % (app_name, model._meta.object_name)
try:
for sql in custom_sql:
cursor.execute(sql)
except Exception, e:
sys.stderr.write("Failed to install custom SQL for %s.%s model: %s" % \
(app_name, model._meta.object_name, e))
transaction.rollback_unless_managed()
else:
transaction.commit_unless_managed()
| bsd-3-clause | -8,844,244,939,070,740,000 | 36.891892 | 99 | 0.616976 | false |
TarasLevelUp/asynqp | src/asynqp/message.py | 1 | 8788 | import json
from collections import OrderedDict
from datetime import datetime
from io import BytesIO
from . import amqptypes
from . import serialisation
class Message(object):
"""
An AMQP Basic message.
Some of the constructor parameters are ignored by the AMQP broker and are provided
just for the convenience of user applications. They are marked "for applications"
in the list below.
:param body: :func:`bytes` , :class:`str` or :class:`dict` representing the body of the message.
Strings will be encoded according to the content_encoding parameter;
dicts will be converted to a string using JSON.
:param dict headers: a dictionary of message headers
:param str content_type: MIME content type
(defaults to 'application/json' if :code:`body` is a :class:`dict`,
or 'application/octet-stream' otherwise)
:param str content_encoding: MIME encoding (defaults to 'utf-8')
:param int delivery_mode: 1 for non-persistent, 2 for persistent
:param int priority: message priority - integer between 0 and 9
:param str correlation_id: correlation id of the message *(for applications)*
:param str reply_to: reply-to address *(for applications)*
:param str expiration: expiration specification *(for applications)*
:param str message_id: unique id of the message *(for applications)*
:param datetime.datetime timestamp: :class:`~datetime.datetime` of when the message was sent
(default: :meth:`datetime.now() <datetime.datetime.now>`)
:param str type: message type *(for applications)*
:param str user_id: ID of the user sending the message *(for applications)*
:param str app_id: ID of the application sending the message *(for applications)*
Attributes are the same as the constructor parameters.
"""
property_types = OrderedDict(
[("content_type", amqptypes.ShortStr),
("content_encoding", amqptypes.ShortStr),
("headers", amqptypes.Table),
("delivery_mode", amqptypes.Octet),
("priority", amqptypes.Octet),
("correlation_id", amqptypes.ShortStr),
("reply_to", amqptypes.ShortStr),
("expiration", amqptypes.ShortStr),
("message_id", amqptypes.ShortStr),
("timestamp", amqptypes.Timestamp),
("type", amqptypes.ShortStr),
("user_id", amqptypes.ShortStr),
("app_id", amqptypes.ShortStr)]
)
def __init__(self, body, *,
headers=None, content_type=None,
content_encoding=None, delivery_mode=None,
priority=None, correlation_id=None,
reply_to=None, expiration=None,
message_id=None, timestamp=None,
type=None, user_id=None,
app_id=None):
if content_encoding is None:
content_encoding = 'utf-8'
if isinstance(body, dict):
body = json.dumps(body)
if content_type is None:
content_type = 'application/json'
elif content_type is None:
content_type = 'application/octet-stream'
if isinstance(body, bytes):
self.body = body
else:
self.body = body.encode(content_encoding)
timestamp = timestamp if timestamp is not None else datetime.now()
self._properties = OrderedDict()
for name, amqptype in self.property_types.items():
value = locals()[name]
if value is not None:
value = amqptype(value)
self._properties[name] = value
def __eq__(self, other):
return (self.body == other.body
and self._properties == other._properties)
def __getattr__(self, name):
try:
return self._properties[name]
except KeyError as e:
raise AttributeError from e
def __setattr__(self, name, value):
amqptype = self.property_types.get(name)
if amqptype is not None:
self._properties[name] = value if isinstance(value, amqptype) else amqptype(value)
return
super().__setattr__(name, value)
def json(self):
"""
Parse the message body as JSON.
:return: the parsed JSON.
"""
return json.loads(self.body.decode(self.content_encoding))
class IncomingMessage(Message):
"""
A message that has been delivered to the client.
Subclass of :class:`Message`.
.. attribute::delivery_tag
The *delivery tag* assigned to this message by the AMQP broker.
.. attribute::exchange_name
The name of the exchange to which the message was originally published.
.. attribute::routing_key
The routing key under which the message was originally published.
"""
def __init__(self, *args, sender, delivery_tag, exchange_name, routing_key, **kwargs):
super().__init__(*args, **kwargs)
self.sender = sender
self.delivery_tag = delivery_tag
self.exchange_name = exchange_name
self.routing_key = routing_key
def ack(self):
"""
Acknowledge the message.
"""
self.sender.send_BasicAck(self.delivery_tag)
def reject(self, *, requeue=True):
"""
Reject the message.
:keyword bool requeue: if true, the broker will attempt to requeue the
message and deliver it to an alternate consumer.
"""
self.sender.send_BasicReject(self.delivery_tag, requeue)
def get_header_payload(message, class_id):
return ContentHeaderPayload(class_id, len(message.body), list(message._properties.values()))
# NB: the total frame size will be 8 bytes larger than frame_body_size
def get_frame_payloads(message, frame_body_size):
frames = []
remaining = message.body
while remaining:
frame = remaining[:frame_body_size]
remaining = remaining[frame_body_size:]
frames.append(frame)
return frames
class ContentHeaderPayload(object):
synchronous = True
def __init__(self, class_id, body_length, properties):
self.class_id = class_id
self.body_length = body_length
self.properties = properties
def __eq__(self, other):
return (self.class_id == other.class_id
and self.body_length == other.body_length
and self.properties == other.properties)
def write(self, stream):
stream.write(serialisation.pack_unsigned_short(self.class_id))
stream.write(serialisation.pack_unsigned_short(0)) # weight
stream.write(serialisation.pack_unsigned_long_long(self.body_length))
bytesio = BytesIO()
property_flags = 0
bitshift = 15
for val in self.properties:
if val is not None:
property_flags |= (1 << bitshift)
val.write(bytesio)
bitshift -= 1
stream.write(serialisation.pack_unsigned_short(property_flags))
stream.write(bytesio.getvalue())
@classmethod
def read(cls, raw):
bytesio = BytesIO(raw)
class_id = serialisation.read_unsigned_short(bytesio)
weight = serialisation.read_unsigned_short(bytesio)
assert weight == 0
body_length = serialisation.read_unsigned_long_long(bytesio)
property_flags_short = serialisation.read_unsigned_short(bytesio)
properties = []
for i, amqptype in enumerate(Message.property_types.values()):
pos = 15 - i # We started from `content_type` witch has pos==15
if property_flags_short & (1 << pos):
properties.append(amqptype.read(bytesio))
else:
properties.append(None)
return cls(class_id, body_length, properties)
class MessageBuilder(object):
def __init__(self, sender, delivery_tag, redelivered, exchange_name, routing_key, consumer_tag=None):
self.sender = sender
self.delivery_tag = delivery_tag
self.body = b''
self.consumer_tag = consumer_tag
self.exchange_name = exchange_name
self.routing_key = routing_key
def set_header(self, header):
self.body_length = header.body_length
self.properties = {}
for name, prop in zip(IncomingMessage.property_types, header.properties):
self.properties[name] = prop
def add_body_chunk(self, chunk):
self.body += chunk
def done(self):
return len(self.body) == self.body_length
def build(self):
return IncomingMessage(
self.body,
sender=self.sender,
delivery_tag=self.delivery_tag,
exchange_name=self.exchange_name,
routing_key=self.routing_key,
**self.properties)
| mit | 9,189,294,022,764,453,000 | 34.152 | 105 | 0.624033 | false |
porksmash/swarfarm | bestiary/models.py | 1 | 75620 | from collections import OrderedDict
from functools import partial
from math import floor, ceil
from operator import is_not
from django.contrib.auth.models import User
from django.contrib.postgres.fields import ArrayField
from django.contrib.staticfiles.templatetags.staticfiles import static
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import Q
from django.utils.safestring import mark_safe
from django.utils.text import slugify
class Monster(models.Model):
ELEMENT_PURE = 'pure'
ELEMENT_FIRE = 'fire'
ELEMENT_WIND = 'wind'
ELEMENT_WATER = 'water'
ELEMENT_LIGHT = 'light'
ELEMENT_DARK = 'dark'
TYPE_ATTACK = 'attack'
TYPE_HP = 'hp'
TYPE_SUPPORT = 'support'
TYPE_DEFENSE = 'defense'
TYPE_MATERIAL = 'material'
TYPE_NONE = 'none'
ELEMENT_CHOICES = (
(ELEMENT_PURE, 'Pure'),
(ELEMENT_FIRE, 'Fire'),
(ELEMENT_WIND, 'Wind'),
(ELEMENT_WATER, 'Water'),
(ELEMENT_LIGHT, 'Light'),
(ELEMENT_DARK, 'Dark'),
)
TYPE_CHOICES = (
(TYPE_NONE, 'None'),
(TYPE_ATTACK, 'Attack'),
(TYPE_HP, 'HP'),
(TYPE_SUPPORT, 'Support'),
(TYPE_DEFENSE, 'Defense'),
(TYPE_MATERIAL, 'Material'),
)
STAR_CHOICES = (
(1, mark_safe('1<span class="glyphicon glyphicon-star"></span>')),
(2, mark_safe('2<span class="glyphicon glyphicon-star"></span>')),
(3, mark_safe('3<span class="glyphicon glyphicon-star"></span>')),
(4, mark_safe('4<span class="glyphicon glyphicon-star"></span>')),
(5, mark_safe('5<span class="glyphicon glyphicon-star"></span>')),
(6, mark_safe('6<span class="glyphicon glyphicon-star"></span>')),
)
name = models.CharField(max_length=40)
com2us_id = models.IntegerField(blank=True, null=True, help_text='ID given in game data files')
family_id = models.IntegerField(blank=True, null=True, help_text='Identifier that matches same family monsters')
image_filename = models.CharField(max_length=250, null=True, blank=True)
element = models.CharField(max_length=6, choices=ELEMENT_CHOICES, default=ELEMENT_FIRE)
archetype = models.CharField(max_length=10, choices=TYPE_CHOICES, default=TYPE_ATTACK)
base_stars = models.IntegerField(choices=STAR_CHOICES, help_text='Default stars a monster is summoned at')
obtainable = models.BooleanField(default=True, help_text='Is available for players to acquire')
can_awaken = models.BooleanField(default=True, help_text='Has an awakened form')
is_awakened = models.BooleanField(default=False, help_text='Is the awakened form')
awaken_bonus = models.TextField(blank=True, help_text='Bonus given upon awakening')
skills = models.ManyToManyField('Skill', blank=True)
skill_ups_to_max = models.IntegerField(null=True, blank=True, help_text='Number of skill-ups required to max all skills')
leader_skill = models.ForeignKey('LeaderSkill', on_delete=models.SET_NULL, null=True, blank=True)
# 1-star lvl 1 values from data source
raw_hp = models.IntegerField(null=True, blank=True, help_text='HP value from game data files')
raw_attack = models.IntegerField(null=True, blank=True, help_text='ATK value from game data files')
raw_defense = models.IntegerField(null=True, blank=True, help_text='DEF value from game data files')
# Base-star lvl MAX values as seen in-game
base_hp = models.IntegerField(null=True, blank=True, help_text='HP at base_stars lvl 1')
base_attack = models.IntegerField(null=True, blank=True, help_text='ATK at base_stars lvl 1')
base_defense = models.IntegerField(null=True, blank=True, help_text='DEF at base_stars lvl 1')
# 6-star lvl MAX values
max_lvl_hp = models.IntegerField(null=True, blank=True, help_text='HP at 6-stars lvl 40')
max_lvl_attack = models.IntegerField(null=True, blank=True, help_text='ATK at 6-stars lvl 40')
max_lvl_defense = models.IntegerField(null=True, blank=True, help_text='DEF at 6-stars lvl 40')
speed = models.IntegerField(null=True, blank=True)
crit_rate = models.IntegerField(null=True, blank=True)
crit_damage = models.IntegerField(null=True, blank=True)
resistance = models.IntegerField(null=True, blank=True)
accuracy = models.IntegerField(null=True, blank=True)
# Homunculus monster fields
homunculus = models.BooleanField(default=False)
craft_materials = models.ManyToManyField('CraftMaterial', through='MonsterCraftCost')
craft_cost = models.IntegerField(null=True, blank=True, help_text='Mana cost to craft this monster')
# Unicorn fields
transforms_into = models.ForeignKey('self', on_delete=models.SET_NULL, null=True, blank=True, related_name='+', help_text='Monster which this monster can transform into during battle')
awakens_from = models.ForeignKey('self', on_delete=models.SET_NULL, null=True, blank=True, related_name='+', help_text='Unawakened form of this monster')
awakens_to = models.ForeignKey('self', on_delete=models.SET_NULL, null=True, blank=True, related_name='+', help_text='Awakened form of this monster')
awaken_mats_fire_low = models.IntegerField(blank=True, default=0)
awaken_mats_fire_mid = models.IntegerField(blank=True, default=0)
awaken_mats_fire_high = models.IntegerField(blank=True, default=0)
awaken_mats_water_low = models.IntegerField(blank=True, default=0)
awaken_mats_water_mid = models.IntegerField(blank=True, default=0)
awaken_mats_water_high = models.IntegerField(blank=True, default=0)
awaken_mats_wind_low = models.IntegerField(blank=True, default=0)
awaken_mats_wind_mid = models.IntegerField(blank=True, default=0)
awaken_mats_wind_high = models.IntegerField(blank=True, default=0)
awaken_mats_light_low = models.IntegerField(blank=True, default=0)
awaken_mats_light_mid = models.IntegerField(blank=True, default=0)
awaken_mats_light_high = models.IntegerField(blank=True, default=0)
awaken_mats_dark_low = models.IntegerField(blank=True, default=0)
awaken_mats_dark_mid = models.IntegerField(blank=True, default=0)
awaken_mats_dark_high = models.IntegerField(blank=True, default=0)
awaken_mats_magic_low = models.IntegerField(blank=True, default=0)
awaken_mats_magic_mid = models.IntegerField(blank=True, default=0)
awaken_mats_magic_high = models.IntegerField(blank=True, default=0)
source = models.ManyToManyField('Source', blank=True, help_text='Where this monster can be acquired from')
farmable = models.BooleanField(default=False, help_text='Monster can be acquired easily without luck')
fusion_food = models.BooleanField(default=False, help_text='Monster is used as a fusion ingredient')
bestiary_slug = models.SlugField(max_length=255, editable=False, null=True)
def image_url(self):
if self.image_filename:
return mark_safe('<img src="%s" height="42" width="42"/>' % static('herders/images/monsters/' + self.image_filename))
else:
return 'No Image'
def max_level_from_stars(self, stars=None):
if stars:
return 10 + stars * 5
else:
return 10 + self.base_stars * 5
def get_stats(self):
from collections import OrderedDict
start_grade = self.base_stars
stats_list = OrderedDict()
if self.is_awakened and self.base_stars > 1:
start_grade -= 1
for grade in range(start_grade, 7):
max_level = self.max_level_from_stars(grade)
# Add the actual calculated stats
stats_list[str(grade)] = {
'HP': self.actual_hp(grade, max_level),
'ATK': self.actual_attack(grade, max_level),
'DEF': self.actual_defense(grade, max_level),
}
return stats_list
def actual_hp(self, grade, level):
# Check that base stat exists first
if not self.raw_hp:
return None
else:
return self._calculate_actual_stat(self.raw_hp, grade, level) * 15
def actual_attack(self, grade=base_stars, level=1):
# Check that base stat exists first
if not self.raw_attack:
return None
else:
return self._calculate_actual_stat(self.raw_attack, grade, level)
def actual_defense(self, grade=base_stars, level=1):
# Check that base stat exists first
if not self.raw_defense:
return None
else:
return self._calculate_actual_stat(self.raw_defense, grade, level)
@staticmethod
def _calculate_actual_stat(stat, grade, level):
# Magic multipliers taken from summoner's war wikia calculator. Used to calculate stats for lvl 1 and lvl MAX
magic_multipliers = [
{'1': 1.0, 'max': 1.9958},
{'1': 1.5966, 'max': 3.03050646},
{'1': 2.4242774, 'max': 4.364426603},
{'1': 3.4914444, 'max': 5.941390935},
{'1': 4.7529032, 'max': 8.072330795},
{'1': 6.4582449, 'max': 10.97901633},
]
max_lvl = 10 + grade * 5
stat_lvl_1 = round(stat * magic_multipliers[grade - 1]['1'], 0)
stat_lvl_max = round(stat * magic_multipliers[grade - 1]['max'], 0)
if level == 1:
return int(stat_lvl_1)
elif level == max_lvl:
return int(stat_lvl_max)
else:
# Use exponential function in format value=ae^(bx)
# a=stat_lvl_1*e^(-b)
from math import log, exp
b_coeff = log(stat_lvl_max / stat_lvl_1) / (max_lvl - 1)
return int(round((stat_lvl_1 * exp(-b_coeff)) * exp(b_coeff * level)))
def monster_family(self):
should_be_shown = Q(obtainable=True) | Q(transforms_into__isnull=False)
family = Monster.objects.filter(family_id=self.family_id).filter(should_be_shown).order_by('element', 'is_awakened')
return [
family.filter(element=Monster.ELEMENT_FIRE).first(),
family.filter(element=Monster.ELEMENT_WATER).first(),
family.filter(element=Monster.ELEMENT_WIND).first(),
family.filter(element=Monster.ELEMENT_LIGHT).first(),
family.filter(element=Monster.ELEMENT_DARK).first(),
]
def all_skill_effects(self):
return SkillEffect.objects.filter(pk__in=self.skills.exclude(skill_effect=None).values_list('skill_effect', flat=True))
def get_awakening_materials(self):
mats = OrderedDict()
mats['magic'] = OrderedDict()
mats['magic']['low'] = self.awaken_mats_magic_low
mats['magic']['mid'] = self.awaken_mats_magic_mid
mats['magic']['high'] = self.awaken_mats_magic_high
mats['fire'] = OrderedDict()
mats['fire']['low'] = self.awaken_mats_fire_low
mats['fire']['mid'] = self.awaken_mats_fire_mid
mats['fire']['high'] = self.awaken_mats_fire_high
mats['water'] = OrderedDict()
mats['water']['low'] = self.awaken_mats_water_low
mats['water']['mid'] = self.awaken_mats_water_mid
mats['water']['high'] = self.awaken_mats_water_high
mats['wind'] = OrderedDict()
mats['wind']['low'] = self.awaken_mats_wind_low
mats['wind']['mid'] = self.awaken_mats_wind_mid
mats['wind']['high'] = self.awaken_mats_wind_high
mats['light'] = OrderedDict()
mats['light']['low'] = self.awaken_mats_light_low
mats['light']['mid'] = self.awaken_mats_light_mid
mats['light']['high'] = self.awaken_mats_light_high
mats['dark'] = OrderedDict()
mats['dark']['low'] = self.awaken_mats_dark_low
mats['dark']['mid'] = self.awaken_mats_dark_mid
mats['dark']['high'] = self.awaken_mats_dark_high
return mats
def clean(self):
# Update null values
if self.awaken_mats_fire_high is None:
self.awaken_mats_fire_high = 0
if self.awaken_mats_fire_mid is None:
self.awaken_mats_fire_mid = 0
if self.awaken_mats_fire_low is None:
self.awaken_mats_fire_low = 0
if self.awaken_mats_water_high is None:
self.awaken_mats_water_high = 0
if self.awaken_mats_water_mid is None:
self.awaken_mats_water_mid = 0
if self.awaken_mats_water_low is None:
self.awaken_mats_water_low = 0
if self.awaken_mats_wind_high is None:
self.awaken_mats_wind_high = 0
if self.awaken_mats_wind_mid is None:
self.awaken_mats_wind_mid = 0
if self.awaken_mats_wind_low is None:
self.awaken_mats_wind_low = 0
if self.awaken_mats_light_high is None:
self.awaken_mats_light_high = 0
if self.awaken_mats_light_mid is None:
self.awaken_mats_light_mid = 0
if self.awaken_mats_light_low is None:
self.awaken_mats_light_low = 0
if self.awaken_mats_dark_high is None:
self.awaken_mats_dark_high = 0
if self.awaken_mats_dark_mid is None:
self.awaken_mats_dark_mid = 0
if self.awaken_mats_dark_low is None:
self.awaken_mats_dark_low = 0
if self.awaken_mats_magic_high is None:
self.awaken_mats_magic_high = 0
if self.awaken_mats_magic_mid is None:
self.awaken_mats_magic_mid = 0
if self.awaken_mats_magic_low is None:
self.awaken_mats_magic_low = 0
super(Monster, self).clean()
def save(self, *args, **kwargs):
# Update null values
if self.awaken_mats_fire_high is None:
self.awaken_mats_fire_high = 0
if self.awaken_mats_fire_mid is None:
self.awaken_mats_fire_mid = 0
if self.awaken_mats_fire_low is None:
self.awaken_mats_fire_low = 0
if self.awaken_mats_water_high is None:
self.awaken_mats_water_high = 0
if self.awaken_mats_water_mid is None:
self.awaken_mats_water_mid = 0
if self.awaken_mats_water_low is None:
self.awaken_mats_water_low = 0
if self.awaken_mats_wind_high is None:
self.awaken_mats_wind_high = 0
if self.awaken_mats_wind_mid is None:
self.awaken_mats_wind_mid = 0
if self.awaken_mats_wind_low is None:
self.awaken_mats_wind_low = 0
if self.awaken_mats_light_high is None:
self.awaken_mats_light_high = 0
if self.awaken_mats_light_mid is None:
self.awaken_mats_light_mid = 0
if self.awaken_mats_light_low is None:
self.awaken_mats_light_low = 0
if self.awaken_mats_dark_high is None:
self.awaken_mats_dark_high = 0
if self.awaken_mats_dark_mid is None:
self.awaken_mats_dark_mid = 0
if self.awaken_mats_dark_low is None:
self.awaken_mats_dark_low = 0
if self.awaken_mats_magic_high is None:
self.awaken_mats_magic_high = 0
if self.awaken_mats_magic_mid is None:
self.awaken_mats_magic_mid = 0
if self.awaken_mats_magic_low is None:
self.awaken_mats_magic_low = 0
if self.raw_hp:
self.base_hp = self._calculate_actual_stat(
self.raw_hp,
self.base_stars,
self.max_level_from_stars(self.base_stars)
) * 15
self.max_lvl_hp = self.actual_hp(6, 40)
if self.raw_attack:
self.base_attack = self._calculate_actual_stat(
self.raw_attack,
self.base_stars,
self.max_level_from_stars(self.base_stars)
)
self.max_lvl_attack = self.actual_attack(6, 40)
if self.raw_defense:
self.base_defense = self._calculate_actual_stat(
self.raw_defense,
self.base_stars,
self.max_level_from_stars(self.base_stars)
)
self.max_lvl_defense = self.actual_defense(6, 40)
if self.is_awakened and self.awakens_from:
self.bestiary_slug = self.awakens_from.bestiary_slug
else:
if self.awakens_to is not None:
self.bestiary_slug = slugify(" ".join([str(self.com2us_id), self.element, self.name, self.awakens_to.name]))
else:
self.bestiary_slug = slugify(" ".join([str(self.com2us_id), self.element, self.name]))
# Pull info from unawakened version of this monster. This copying of data is one directional only
if self.awakens_from:
# Copy awaken bonus from unawakened version
if self.is_awakened and self.awakens_from.awaken_bonus:
self.awaken_bonus = self.awakens_from.awaken_bonus
super(Monster, self).save(*args, **kwargs)
# Automatically set awakens from/to relationship if none exists
if self.awakens_from and self.awakens_from.awakens_to is not self:
self.awakens_from.awakens_to = self
self.awakens_from.save()
elif self.awakens_to and self.awakens_to.awakens_from is not self:
self.awakens_to.awakens_from = self
self.awakens_to.save()
class Meta:
ordering = ['name', 'element']
def __str__(self):
if self.is_awakened:
return self.name
else:
return self.name + ' (' + self.element.capitalize() + ')'
class Skill(models.Model):
name = models.CharField(max_length=40)
com2us_id = models.IntegerField(blank=True, null=True, help_text='ID given in game data files')
description = models.TextField()
slot = models.IntegerField(default=1, help_text='Which button position the skill is in during battle')
skill_effect = models.ManyToManyField('SkillEffect', blank=True)
effect = models.ManyToManyField('SkillEffect', through='SkillEffectDetail', blank=True, related_name='effect', help_text='Detailed skill effect information')
cooltime = models.IntegerField(null=True, blank=True, help_text='Number of turns until skill can be used again')
hits = models.IntegerField(default=1, help_text='Number of times this skill hits an enemy')
aoe = models.BooleanField(default=False, help_text='Skill affects all enemies or allies')
passive = models.BooleanField(default=False, help_text='Skill activates automatically')
max_level = models.IntegerField()
level_progress_description = models.TextField(null=True, blank=True, help_text='Description of bonus each skill level')
icon_filename = models.CharField(max_length=100, null=True, blank=True)
multiplier_formula = models.TextField(null=True, blank=True, help_text='Parsed multiplier formula')
multiplier_formula_raw = models.CharField(max_length=150, null=True, blank=True, help_text='Multiplier formula given in game data files')
scaling_stats = models.ManyToManyField('ScalingStat', blank=True, help_text='Monster stats which this skill scales on')
def image_url(self):
if self.icon_filename:
return mark_safe('<img src="%s" height="42" width="42"/>' % static('herders/images/skills/' + self.icon_filename))
else:
return 'No Image'
def level_progress_description_list(self):
return self.level_progress_description.splitlines()
def __str__(self):
if self.name:
name = self.name
else:
name = ''
if self.icon_filename:
icon = ' - ' + self.icon_filename
else:
icon = ''
if self.com2us_id:
com2us_id = ' - ' + str(self.com2us_id)
else:
com2us_id = ''
return name + com2us_id + icon
class Meta:
ordering = ['slot', 'name']
verbose_name = 'Skill'
verbose_name_plural = 'Skills'
class LeaderSkill(models.Model):
ATTRIBUTE_HP = 1
ATTRIBUTE_ATK = 2
ATTRIBUTE_DEF = 3
ATTRIBUTE_SPD = 4
ATTRIBUTE_CRIT_RATE = 5
ATTRIBUTE_RESIST = 6
ATTRIBUTE_ACCURACY = 7
ATTRIBUTE_CRIT_DMG = 8
ATTRIBUTE_CHOICES = (
(ATTRIBUTE_HP, 'HP'),
(ATTRIBUTE_ATK, 'Attack Power'),
(ATTRIBUTE_DEF, 'Defense'),
(ATTRIBUTE_SPD, 'Attack Speed'),
(ATTRIBUTE_CRIT_RATE, 'Critical Rate'),
(ATTRIBUTE_RESIST, 'Resistance'),
(ATTRIBUTE_ACCURACY, 'Accuracy'),
(ATTRIBUTE_CRIT_DMG, 'Critical DMG'),
)
AREA_GENERAL = 1
AREA_DUNGEON = 2
AREA_ELEMENT = 3
AREA_ARENA = 4
AREA_GUILD = 5
AREA_CHOICES = (
(AREA_GENERAL, 'General'),
(AREA_DUNGEON, 'Dungeon'),
(AREA_ELEMENT, 'Element'),
(AREA_ARENA, 'Arena'),
(AREA_GUILD, 'Guild'),
)
attribute = models.IntegerField(choices=ATTRIBUTE_CHOICES, help_text='Monster stat which is granted the bonus')
amount = models.IntegerField(help_text='Amount of bonus granted')
area = models.IntegerField(choices=AREA_CHOICES, default=AREA_GENERAL, help_text='Where this leader skill has an effect')
element = models.CharField(max_length=6, null=True, blank=True, choices=Monster.ELEMENT_CHOICES, help_text='Element of monster which this leader skill applies to')
def skill_string(self):
if self.area == self.AREA_DUNGEON:
condition = 'in the Dungeons '
elif self.area == self.AREA_ARENA:
condition = 'in the Arena '
elif self.area == self.AREA_GUILD:
condition = 'in Guild Content '
elif self.area == self.AREA_ELEMENT:
condition = 'with {} attribute '.format(self.get_element_display())
else:
condition = ''
return "Increase the {0} of ally monsters {1}by {2}%".format(self.get_attribute_display(), condition, self.amount)
def icon_filename(self):
if self.area == self.AREA_ELEMENT:
suffix = '_{}'.format(self.get_element_display())
elif self.area == self.AREA_GENERAL:
suffix = ''
else:
suffix = '_{}'.format(self.get_area_display())
return 'leader_skill_{0}{1}.png'.format(self.get_attribute_display().replace(' ', '_'), suffix)
def image_url(self):
return mark_safe('<img src="{}" height="42" width="42"/>'.format(
static('herders/images/skills/leader/' + self.icon_filename())
))
def __str__(self):
if self.area == self.AREA_ELEMENT:
condition = ' {}'.format(self.get_element_display())
elif self.area == self.AREA_GENERAL:
condition = ''
else:
condition = ' {}'.format(self.get_area_display())
return self.get_attribute_display() + ' ' + str(self.amount) + '%' + condition
class Meta:
ordering = ['attribute', 'amount', 'element']
verbose_name = 'Leader Skill'
verbose_name_plural = 'Leader Skills'
class SkillEffectBuffsManager(models.Manager):
def get_queryset(self):
return super(SkillEffectBuffsManager, self).get_queryset().values_list('pk', 'icon_filename').filter(is_buff=True).exclude(icon_filename='')
class SkillEffectDebuffsManager(models.Manager):
def get_queryset(self):
return super(SkillEffectDebuffsManager, self).get_queryset().values_list('pk', 'icon_filename').filter(is_buff=False).exclude(icon_filename='')
class SkillEffectOtherManager(models.Manager):
def get_queryset(self):
return super(SkillEffectOtherManager, self).get_queryset().filter(icon_filename='')
class SkillEffect(models.Model):
is_buff = models.BooleanField(default=True, help_text='Effect is beneficial to affected monster')
name = models.CharField(max_length=40)
description = models.TextField()
icon_filename = models.CharField(max_length=100, blank=True, default='')
objects = models.Manager()
class Meta:
ordering = ['name']
verbose_name = 'Skill Effect'
verbose_name_plural = 'Skill Effects'
def image_url(self):
if self.icon_filename:
return mark_safe('<img src="%s" height="42" width="42"/>' % static('herders/images/buffs/' + self.icon_filename))
else:
return 'No Image'
def __str__(self):
return self.name
class SkillEffectDetail(models.Model):
skill = models.ForeignKey(Skill, on_delete=models.CASCADE)
effect = models.ForeignKey(SkillEffect, on_delete=models.CASCADE)
aoe = models.BooleanField(default=False, help_text='Effect applies to entire friendly or enemy group')
single_target = models.BooleanField(default=False, help_text='Effect applies to a single monster')
self_effect = models.BooleanField(default=False, help_text='Effect applies to the monster using the skill')
chance = models.IntegerField(null=True, blank=True, help_text='Chance of effect occuring per hit')
on_crit = models.BooleanField(default=False)
on_death = models.BooleanField(default=False)
random = models.BooleanField(default=False, help_text='Skill effect applies randomly to the target')
quantity = models.IntegerField(null=True, blank=True, help_text='Number of items this effect affects on the target')
all = models.BooleanField(default=False, help_text='This effect affects all items on the target')
self_hp = models.BooleanField(default=False, help_text="Amount of this effect is based on casting monster's HP")
target_hp = models.BooleanField(default=False, help_text="Amount of this effect is based on target monster's HP")
damage = models.BooleanField(default=False, help_text='Amount of this effect is based on damage dealt')
note = models.TextField(blank=True, null=True, help_text="Explain anything else that doesn't fit in other fields")
class ScalingStat(models.Model):
stat = models.CharField(max_length=20)
com2us_desc = models.CharField(max_length=30, null=True, blank=True)
description = models.TextField(null=True, blank=True)
def __str__(self):
return self.stat
class Meta:
ordering = ['stat',]
verbose_name = 'Scaling Stat'
verbose_name_plural = 'Scaling Stats'
class HomunculusSkill(models.Model):
skill = models.ForeignKey(Skill, on_delete=models.CASCADE)
monsters = models.ManyToManyField(Monster)
craft_materials = models.ManyToManyField('CraftMaterial', through='HomunculusSkillCraftCost', help_text='Crafting materials required to purchase')
mana_cost = models.IntegerField(default=0, help_text='Cost to purchase')
prerequisites = models.ManyToManyField(Skill, blank=True, related_name='homunculus_prereq', help_text='Skills which must be acquired first')
def __str__(self):
return '{} ({})'.format(self.skill, self.skill.com2us_id)
class Source(models.Model):
name = models.CharField(max_length=100)
description = models.TextField(null=True, blank=True)
icon_filename = models.CharField(max_length=100, null=True, blank=True)
farmable_source = models.BooleanField(default=False)
meta_order = models.IntegerField(db_index=True, default=0)
def image_url(self):
if self.icon_filename:
return mark_safe('<img src="%s" height="42" width="42"/>' % static('herders/images/icons/' + self.icon_filename))
else:
return 'No Image'
def __str__(self):
return self.name
class Meta:
ordering = ['meta_order', 'icon_filename', 'name']
class Fusion(models.Model):
product = models.ForeignKey('Monster', on_delete=models.CASCADE, related_name='product')
stars = models.IntegerField()
cost = models.IntegerField()
ingredients = models.ManyToManyField('Monster')
meta_order = models.IntegerField(db_index=True, default=0)
def __str__(self):
return str(self.product) + ' Fusion'
class Meta:
ordering = ['meta_order']
def sub_fusion_available(self):
return Fusion.objects.filter(product__in=self.ingredients.values_list('awakens_from__pk', flat=True)).exists()
def total_awakening_cost(self, owned_ingredients=None):
cost = {
'magic': {
'low': 0,
'mid': 0,
'high': 0,
},
'fire': {
'low': 0,
'mid': 0,
'high': 0,
},
'water': {
'low': 0,
'mid': 0,
'high': 0,
},
'wind': {
'low': 0,
'mid': 0,
'high': 0,
},
'light': {
'low': 0,
'mid': 0,
'high': 0,
},
'dark': {
'low': 0,
'mid': 0,
'high': 0,
},
}
if owned_ingredients:
qs = self.ingredients.exclude(pk__in=[o.monster.pk for o in owned_ingredients])
else:
qs = self.ingredients.all()
for ingredient in qs:
if ingredient.awakens_from:
cost['magic']['low'] += ingredient.awakens_from.awaken_mats_magic_low
cost['magic']['mid'] += ingredient.awakens_from.awaken_mats_magic_mid
cost['magic']['high'] += ingredient.awakens_from.awaken_mats_magic_high
cost['fire']['low'] += ingredient.awakens_from.awaken_mats_fire_low
cost['fire']['mid'] += ingredient.awakens_from.awaken_mats_fire_mid
cost['fire']['high'] += ingredient.awakens_from.awaken_mats_fire_high
cost['water']['low'] += ingredient.awakens_from.awaken_mats_water_low
cost['water']['mid'] += ingredient.awakens_from.awaken_mats_water_mid
cost['water']['high'] += ingredient.awakens_from.awaken_mats_water_high
cost['wind']['low'] += ingredient.awakens_from.awaken_mats_wind_low
cost['wind']['mid'] += ingredient.awakens_from.awaken_mats_wind_mid
cost['wind']['high'] += ingredient.awakens_from.awaken_mats_wind_high
cost['light']['low'] += ingredient.awakens_from.awaken_mats_light_low
cost['light']['mid'] += ingredient.awakens_from.awaken_mats_light_mid
cost['light']['high'] += ingredient.awakens_from.awaken_mats_light_high
cost['dark']['low'] += ingredient.awakens_from.awaken_mats_dark_low
cost['dark']['mid'] += ingredient.awakens_from.awaken_mats_dark_mid
cost['dark']['high'] += ingredient.awakens_from.awaken_mats_dark_high
return cost
class Building(models.Model):
AREA_GENERAL = 0
AREA_GUILD = 1
AREA_CHOICES = [
(AREA_GENERAL, 'Everywhere'),
(AREA_GUILD, 'Guild Content'),
]
STAT_HP = 0
STAT_ATK = 1
STAT_DEF = 2
STAT_SPD = 3
STAT_CRIT_RATE_PCT = 4
STAT_CRIT_DMG_PCT = 5
STAT_RESIST_PCT = 6
STAT_ACCURACY_PCT = 7
MAX_ENERGY = 8
MANA_STONE_STORAGE = 9
MANA_STONE_PRODUCTION = 10
ENERGY_PRODUCTION = 11
ARCANE_TOWER_ATK = 12
ARCANE_TOWER_SPD = 13
STAT_CHOICES = [
(STAT_HP, 'HP'),
(STAT_ATK, 'ATK'),
(STAT_DEF, 'DEF'),
(STAT_SPD, 'SPD'),
(STAT_CRIT_RATE_PCT, 'CRI Rate'),
(STAT_CRIT_DMG_PCT, 'CRI Dmg'),
(STAT_RESIST_PCT, 'Resistance'),
(STAT_ACCURACY_PCT, 'Accuracy'),
(MAX_ENERGY, 'Max. Energy'),
(MANA_STONE_STORAGE, 'Mana Stone Storage'),
(MANA_STONE_PRODUCTION, 'Mana Stone Production Rate'),
(ENERGY_PRODUCTION, 'Energy Production Rate'),
(ARCANE_TOWER_ATK, 'Arcane Tower ATK'),
(ARCANE_TOWER_SPD, 'Arcane Tower SPD'),
]
PERCENT_STATS = [
STAT_HP,
STAT_ATK,
STAT_DEF,
STAT_SPD,
STAT_CRIT_RATE_PCT,
STAT_CRIT_DMG_PCT,
STAT_RESIST_PCT,
STAT_ACCURACY_PCT,
MANA_STONE_PRODUCTION,
ENERGY_PRODUCTION,
ARCANE_TOWER_ATK,
ARCANE_TOWER_SPD,
]
com2us_id = models.IntegerField()
name = models.CharField(max_length=30)
max_level = models.IntegerField()
area = models.IntegerField(choices=AREA_CHOICES, null=True, blank=True)
affected_stat = models.IntegerField(choices=STAT_CHOICES, null=True, blank=True)
element = models.CharField(max_length=6, choices=Monster.ELEMENT_CHOICES, blank=True, null=True)
stat_bonus = ArrayField(models.IntegerField(blank=True, null=True))
upgrade_cost = ArrayField(models.IntegerField(blank=True, null=True))
description = models.TextField(null=True, blank=True)
icon_filename = models.CharField(max_length=100, null=True, blank=True)
def image_url(self):
if self.icon_filename:
return mark_safe('<img src="%s" height="42" width="42"/>' % static('herders/images/buildings/' + self.icon_filename))
else:
return 'No Image'
def __str__(self):
return self.name
class CraftMaterial(models.Model):
com2us_id = models.IntegerField()
name = models.CharField(max_length=40)
icon_filename = models.CharField(max_length=100, null=True, blank=True)
sell_value = models.IntegerField(blank=True, null=True)
source = models.ManyToManyField(Source, blank=True)
def image_url(self):
if self.icon_filename:
return mark_safe('<img src="%s" height="42" width="42"/>' % static('herders/images/crafts/' + self.icon_filename))
else:
return 'No Image'
def __str__(self):
return self.name
class MonsterCraftCost(models.Model):
monster = models.ForeignKey(Monster, on_delete=models.CASCADE)
craft = models.ForeignKey(CraftMaterial, on_delete=models.CASCADE)
quantity = models.IntegerField()
def __str__(self):
return '{} - qty. {}'.format(self.craft.name, self.quantity)
class HomunculusSkillCraftCost(models.Model):
skill = models.ForeignKey(HomunculusSkill, on_delete=models.CASCADE)
craft = models.ForeignKey(CraftMaterial, on_delete=models.CASCADE)
quantity = models.IntegerField()
def __str__(self):
return '{} - qty. {}'.format(self.craft.name, self.quantity)
class RuneObjectBase:
# Provides basic rune related constants
TYPE_ENERGY = 1
TYPE_FATAL = 2
TYPE_BLADE = 3
TYPE_RAGE = 4
TYPE_SWIFT = 5
TYPE_FOCUS = 6
TYPE_GUARD = 7
TYPE_ENDURE = 8
TYPE_VIOLENT = 9
TYPE_WILL = 10
TYPE_NEMESIS = 11
TYPE_SHIELD = 12
TYPE_REVENGE = 13
TYPE_DESPAIR = 14
TYPE_VAMPIRE = 15
TYPE_DESTROY = 16
TYPE_FIGHT = 17
TYPE_DETERMINATION = 18
TYPE_ENHANCE = 19
TYPE_ACCURACY = 20
TYPE_TOLERANCE = 21
TYPE_CHOICES = (
(TYPE_ENERGY, 'Energy'),
(TYPE_FATAL, 'Fatal'),
(TYPE_BLADE, 'Blade'),
(TYPE_RAGE, 'Rage'),
(TYPE_SWIFT, 'Swift'),
(TYPE_FOCUS, 'Focus'),
(TYPE_GUARD, 'Guard'),
(TYPE_ENDURE, 'Endure'),
(TYPE_VIOLENT, 'Violent'),
(TYPE_WILL, 'Will'),
(TYPE_NEMESIS, 'Nemesis'),
(TYPE_SHIELD, 'Shield'),
(TYPE_REVENGE, 'Revenge'),
(TYPE_DESPAIR, 'Despair'),
(TYPE_VAMPIRE, 'Vampire'),
(TYPE_DESTROY, 'Destroy'),
(TYPE_FIGHT, 'Fight'),
(TYPE_DETERMINATION, 'Determination'),
(TYPE_ENHANCE, 'Enhance'),
(TYPE_ACCURACY, 'Accuracy'),
(TYPE_TOLERANCE, 'Tolerance'),
)
STAR_CHOICES = (
(1, 1),
(2, 2),
(3, 3),
(4, 4),
(5, 5),
(6, 6),
)
STAT_HP = 1
STAT_HP_PCT = 2
STAT_ATK = 3
STAT_ATK_PCT = 4
STAT_DEF = 5
STAT_DEF_PCT = 6
STAT_SPD = 7
STAT_CRIT_RATE_PCT = 8
STAT_CRIT_DMG_PCT = 9
STAT_RESIST_PCT = 10
STAT_ACCURACY_PCT = 11
# Used for selecting type of stat in form
STAT_CHOICES = (
(STAT_HP, 'HP'),
(STAT_HP_PCT, 'HP %'),
(STAT_ATK, 'ATK'),
(STAT_ATK_PCT, 'ATK %'),
(STAT_DEF, 'DEF'),
(STAT_DEF_PCT, 'DEF %'),
(STAT_SPD, 'SPD'),
(STAT_CRIT_RATE_PCT, 'CRI Rate %'),
(STAT_CRIT_DMG_PCT, 'CRI Dmg %'),
(STAT_RESIST_PCT, 'Resistance %'),
(STAT_ACCURACY_PCT, 'Accuracy %'),
)
# The STAT_DISPLAY is used to construct rune values for display as 'HP: 5%' rather than 'HP %: 5' using
# the built in get_FOO_display() functions
STAT_DISPLAY = {
STAT_HP: 'HP',
STAT_HP_PCT: 'HP',
STAT_ATK: 'ATK',
STAT_ATK_PCT: 'ATK',
STAT_DEF: 'DEF',
STAT_DEF_PCT: 'DEF',
STAT_SPD: 'SPD',
STAT_CRIT_RATE_PCT: 'CRI Rate',
STAT_CRIT_DMG_PCT: 'CRI Dmg',
STAT_RESIST_PCT: 'Resistance',
STAT_ACCURACY_PCT: 'Accuracy',
}
PERCENT_STATS = [
STAT_HP_PCT,
STAT_ATK_PCT,
STAT_DEF_PCT,
STAT_CRIT_RATE_PCT,
STAT_CRIT_DMG_PCT,
STAT_RESIST_PCT,
STAT_ACCURACY_PCT,
]
FLAT_STATS = [
STAT_HP,
STAT_ATK,
STAT_DEF,
STAT_SPD,
]
QUALITY_NORMAL = 0
QUALITY_MAGIC = 1
QUALITY_RARE = 2
QUALITY_HERO = 3
QUALITY_LEGEND = 4
QUALITY_CHOICES = (
(QUALITY_NORMAL, 'Normal'),
(QUALITY_MAGIC, 'Magic'),
(QUALITY_RARE, 'Rare'),
(QUALITY_HERO, 'Hero'),
(QUALITY_LEGEND, 'Legend'),
)
class Rune(models.Model, RuneObjectBase):
MAIN_STAT_VALUES = {
# [stat][stars][level]: value
RuneObjectBase.STAT_HP: {
1: [40, 85, 130, 175, 220, 265, 310, 355, 400, 445, 490, 535, 580, 625, 670, 804],
2: [70, 130, 190, 250, 310, 370, 430, 490, 550, 610, 670, 730, 790, 850, 910, 1092],
3: [100, 175, 250, 325, 400, 475, 550, 625, 700, 775, 850, 925, 1000, 1075, 1150, 1380],
4: [160, 250, 340, 430, 520, 610, 700, 790, 880, 970, 1060, 1150, 1240, 1330, 1420, 1704],
5: [270, 375, 480, 585, 690, 795, 900, 1005, 1110, 1215, 1320, 1425, 1530, 1635, 1740, 2088],
6: [360, 480, 600, 720, 840, 960, 1080, 1200, 1320, 1440, 1560, 1680, 1800, 1920, 2040, 2448],
},
RuneObjectBase.STAT_HP_PCT: {
1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18],
2: [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 19],
3: [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 38],
4: [5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 27, 29, 31, 33, 36, 43],
5: [8, 10, 12, 15, 17, 20, 22, 24, 27, 29, 32, 34, 37, 40, 43, 51],
6: [11, 14, 17, 20, 23, 26, 29, 32, 35, 38, 41, 44, 47, 50, 53, 63],
},
RuneObjectBase.STAT_ATK: {
1: [3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 54],
2: [5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, 73],
3: [7, 12, 17, 22, 27, 32, 37, 42, 47, 52, 57, 62, 67, 72, 77, 92],
4: [10, 16, 22, 28, 34, 40, 46, 52, 58, 64, 70, 76, 82, 88, 94, 112],
5: [15, 22, 29, 36, 43, 50, 57, 64, 71, 78, 85, 92, 99, 106, 113, 135],
6: [22, 30, 38, 46, 54, 62, 70, 78, 86, 94, 102, 110, 118, 126, 134, 160],
},
RuneObjectBase.STAT_ATK_PCT: {
1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18],
2: [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 19],
3: [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 38],
4: [5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 27, 29, 31, 33, 36, 43],
5: [8, 10, 12, 15, 17, 20, 22, 24, 27, 29, 32, 34, 37, 40, 43, 51],
6: [11, 14, 17, 20, 23, 26, 29, 32, 35, 38, 41, 44, 47, 50, 53, 63],
},
RuneObjectBase.STAT_DEF: {
1: [3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 54],
2: [5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, 73],
3: [7, 12, 17, 22, 27, 32, 37, 42, 47, 52, 57, 62, 67, 72, 77, 92],
4: [10, 16, 22, 28, 34, 40, 46, 52, 58, 64, 70, 76, 82, 88, 94, 112],
5: [15, 22, 29, 36, 43, 50, 57, 64, 71, 78, 85, 92, 99, 106, 113, 135],
6: [22, 30, 38, 46, 54, 62, 70, 78, 86, 94, 102, 110, 118, 126, 134, 160],
},
RuneObjectBase.STAT_DEF_PCT: {
1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18],
2: [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 19],
3: [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 38],
4: [5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 27, 29, 31, 33, 36, 43],
5: [8, 10, 12, 15, 17, 20, 22, 24, 27, 29, 32, 34, 37, 40, 43, 51],
6: [11, 14, 17, 20, 23, 26, 29, 32, 35, 38, 41, 44, 47, 50, 53, 63],
},
RuneObjectBase.STAT_SPD: {
1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18],
2: [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 19],
3: [3, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17, 18, 19, 21, 25],
4: [4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, 25, 30],
5: [5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 39],
6: [7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 42],
},
RuneObjectBase.STAT_CRIT_RATE_PCT: {
1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18],
2: [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 19],
3: [3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 37],
4: [4, 6, 8, 11, 13, 15, 17, 19, 22, 24, 26, 28, 30, 33, 35, 41],
5: [5, 7, 10, 12, 15, 17, 19, 22, 24, 27, 29, 31, 34, 36, 39, 47],
6: [7, 10, 13, 16, 19, 22, 25, 28, 31, 34, 37, 40, 43, 46, 49, 58],
},
RuneObjectBase.STAT_CRIT_DMG_PCT: {
1: [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 19],
2: [3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 37],
3: [4, 6, 9, 11, 13, 16, 18, 20, 22, 25, 27, 29, 32, 34, 36, 43],
4: [6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 48, 57],
5: [8, 11, 15, 18, 21, 25, 28, 31, 34, 38, 41, 44, 48, 51, 54, 65],
6: [11, 15, 19, 23, 27, 31, 35, 39, 43, 47, 51, 55, 59, 63, 67, 80],
},
RuneObjectBase.STAT_RESIST_PCT: {
1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18],
2: [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 19],
3: [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 38],
4: [6, 8, 10, 13, 15, 17, 19, 21, 24, 26, 28, 30, 32, 35, 37, 44],
5: [9, 11, 14, 16, 19, 21, 23, 26, 28, 31, 33, 35, 38, 40, 43, 51],
6: [12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 48, 51, 54, 64],
},
RuneObjectBase.STAT_ACCURACY_PCT: {
1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18],
2: [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 19],
3: [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 38],
4: [6, 8, 10, 13, 15, 17, 19, 21, 24, 26, 28, 30, 32, 35, 37, 44],
5: [9, 11, 14, 16, 19, 21, 23, 26, 28, 31, 33, 35, 38, 40, 43, 51],
6: [12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 48, 51, 54, 64],
},
}
MAIN_STATS_BY_SLOT = {
1: [
RuneObjectBase.STAT_ATK,
],
2: [
RuneObjectBase.STAT_ATK,
RuneObjectBase.STAT_ATK_PCT,
RuneObjectBase.STAT_DEF,
RuneObjectBase.STAT_DEF_PCT,
RuneObjectBase.STAT_HP,
RuneObjectBase.STAT_HP_PCT,
RuneObjectBase.STAT_SPD,
],
3: [
RuneObjectBase.STAT_DEF,
],
4: [
RuneObjectBase.STAT_ATK,
RuneObjectBase.STAT_ATK_PCT,
RuneObjectBase.STAT_DEF,
RuneObjectBase.STAT_DEF_PCT,
RuneObjectBase.STAT_HP,
RuneObjectBase.STAT_HP_PCT,
RuneObjectBase.STAT_CRIT_RATE_PCT,
RuneObjectBase.STAT_CRIT_DMG_PCT,
],
5: [
RuneObjectBase.STAT_HP,
],
6: [
RuneObjectBase.STAT_ATK,
RuneObjectBase.STAT_ATK_PCT,
RuneObjectBase.STAT_DEF,
RuneObjectBase.STAT_DEF_PCT,
RuneObjectBase.STAT_HP,
RuneObjectBase.STAT_HP_PCT,
RuneObjectBase.STAT_RESIST_PCT,
RuneObjectBase.STAT_ACCURACY_PCT,
]
}
SUBSTAT_INCREMENTS = {
# [stat][stars]: value
RuneObjectBase.STAT_HP: {
1: 60,
2: 105,
3: 165,
4: 225,
5: 300,
6: 375,
},
RuneObjectBase.STAT_HP_PCT: {
1: 2,
2: 3,
3: 5,
4: 6,
5: 7,
6: 8,
},
RuneObjectBase.STAT_ATK: {
1: 4,
2: 5,
3: 8,
4: 10,
5: 15,
6: 20,
},
RuneObjectBase.STAT_ATK_PCT: {
1: 2,
2: 3,
3: 5,
4: 6,
5: 7,
6: 8,
},
RuneObjectBase.STAT_DEF: {
1: 4,
2: 5,
3: 8,
4: 10,
5: 15,
6: 20,
},
RuneObjectBase.STAT_DEF_PCT: {
1: 2,
2: 3,
3: 5,
4: 6,
5: 7,
6: 8,
},
RuneObjectBase.STAT_SPD: {
1: 1,
2: 2,
3: 3,
4: 4,
5: 5,
6: 6,
},
RuneObjectBase.STAT_CRIT_RATE_PCT: {
1: 1,
2: 2,
3: 3,
4: 4,
5: 5,
6: 6,
},
RuneObjectBase.STAT_CRIT_DMG_PCT: {
1: 2,
2: 3,
3: 4,
4: 5,
5: 6,
6: 7,
},
RuneObjectBase.STAT_RESIST_PCT: {
1: 2,
2: 3,
3: 5,
4: 6,
5: 7,
6: 8,
},
RuneObjectBase.STAT_ACCURACY_PCT: {
1: 2,
2: 3,
3: 5,
4: 6,
5: 7,
6: 8,
},
}
INNATE_STAT_TITLES = {
RuneObjectBase.STAT_HP: 'Strong',
RuneObjectBase.STAT_HP_PCT: 'Tenacious',
RuneObjectBase.STAT_ATK: 'Ferocious',
RuneObjectBase.STAT_ATK_PCT: 'Powerful',
RuneObjectBase.STAT_DEF: 'Sturdy',
RuneObjectBase.STAT_DEF_PCT: 'Durable',
RuneObjectBase.STAT_SPD: 'Quick',
RuneObjectBase.STAT_CRIT_RATE_PCT: 'Mortal',
RuneObjectBase.STAT_CRIT_DMG_PCT: 'Cruel',
RuneObjectBase.STAT_RESIST_PCT: 'Resistant',
RuneObjectBase.STAT_ACCURACY_PCT: 'Intricate',
}
RUNE_SET_COUNT_REQUIREMENTS = {
RuneObjectBase.TYPE_ENERGY: 2,
RuneObjectBase.TYPE_FATAL: 4,
RuneObjectBase.TYPE_BLADE: 2,
RuneObjectBase.TYPE_RAGE: 4,
RuneObjectBase.TYPE_SWIFT: 4,
RuneObjectBase.TYPE_FOCUS: 2,
RuneObjectBase.TYPE_GUARD: 2,
RuneObjectBase.TYPE_ENDURE: 2,
RuneObjectBase.TYPE_VIOLENT: 4,
RuneObjectBase.TYPE_WILL: 2,
RuneObjectBase.TYPE_NEMESIS: 2,
RuneObjectBase.TYPE_SHIELD: 2,
RuneObjectBase.TYPE_REVENGE: 2,
RuneObjectBase.TYPE_DESPAIR: 4,
RuneObjectBase.TYPE_VAMPIRE: 4,
RuneObjectBase.TYPE_DESTROY: 2,
RuneObjectBase.TYPE_FIGHT: 2,
RuneObjectBase.TYPE_DETERMINATION: 2,
RuneObjectBase.TYPE_ENHANCE: 2,
RuneObjectBase.TYPE_ACCURACY: 2,
RuneObjectBase.TYPE_TOLERANCE: 2,
}
RUNE_SET_BONUSES = {
RuneObjectBase.TYPE_ENERGY: {
'count': 2,
'stat': RuneObjectBase.STAT_HP_PCT,
'value': 15.0,
'team': False,
'description': '2 Set: HP +15%',
},
RuneObjectBase.TYPE_FATAL: {
'count': 4,
'stat': RuneObjectBase.STAT_ATK_PCT,
'value': 35.0,
'team': False,
'description': '4 Set: Attack Power +35%',
},
RuneObjectBase.TYPE_BLADE: {
'count': 2,
'stat': RuneObjectBase.STAT_CRIT_RATE_PCT,
'value': 12.0,
'team': False,
'description': '2 Set: Critical Rate +12%',
},
RuneObjectBase.TYPE_RAGE: {
'count': 4,
'stat': RuneObjectBase.STAT_CRIT_DMG_PCT,
'value': 40.0,
'team': False,
'description': '4 Set: Critical Damage +40%',
},
RuneObjectBase.TYPE_SWIFT: {
'count': 4,
'stat': RuneObjectBase.STAT_SPD,
'value': 25.0,
'team': False,
'description': '4 Set: Attack Speed +25%',
},
RuneObjectBase.TYPE_FOCUS: {
'count': 2,
'stat': RuneObjectBase.STAT_ACCURACY_PCT,
'value': 20.0,
'team': False,
'description': '2 Set: Accuracy +20%',
},
RuneObjectBase.TYPE_GUARD: {
'count': 2,
'stat': RuneObjectBase.STAT_DEF_PCT,
'value': 15.0,
'team': False,
'description': '2 Set: Defense +15%',
},
RuneObjectBase.TYPE_ENDURE: {
'count': 2,
'stat': RuneObjectBase.STAT_RESIST_PCT,
'value': 20.0,
'team': False,
'description': '2 Set: Resistance +20%',
},
RuneObjectBase.TYPE_VIOLENT: {
'count': 4,
'stat': None,
'value': None,
'team': False,
'description': '4 Set: Get Extra Turn +22%',
},
RuneObjectBase.TYPE_WILL: {
'count': 2,
'stat': None,
'value': None,
'team': False,
'description': '2 Set: Immunity +1 turn',
},
RuneObjectBase.TYPE_NEMESIS: {
'count': 2,
'stat': None,
'value': None,
'team': False,
'description': '2 Set: ATK Gauge +4% (for every 7% HP lost)',
},
RuneObjectBase.TYPE_SHIELD: {
'count': 2,
'stat': None,
'value': None,
'team': True,
'description': '2 Set: Ally Shield 3 turns (15% of HP)',
},
RuneObjectBase.TYPE_REVENGE: {
'count': 2,
'stat': None,
'value': None,
'team': False,
'description': '2 Set: Counterattack +15%',
},
RuneObjectBase.TYPE_DESPAIR: {
'count': 4,
'stat': None,
'value': None,
'team': False,
'description': '4 Set: Stun Rate +25%',
},
RuneObjectBase.TYPE_VAMPIRE: {
'count': 4,
'stat': None,
'value': None,
'team': False,
'description': '4 Set: Life Drain +35%',
},
RuneObjectBase.TYPE_DESTROY: {
'count': 2,
'stat': None,
'value': None,
'team': False,
'description': "2 Set: 30% of the damage dealt will reduce up to 4% of the enemy's Max HP",
},
RuneObjectBase.TYPE_FIGHT: {
'count': 2,
'stat': RuneObjectBase.STAT_ATK,
'value': 7.0,
'team': True,
'description': '2 Set: Increase the Attack Power of all allies by 7%',
},
RuneObjectBase.TYPE_DETERMINATION: {
'count': 2,
'stat': RuneObjectBase.STAT_DEF,
'value': 7.0,
'team': True,
'description': '2 Set: Increase the Defense of all allies by 7%',
},
RuneObjectBase.TYPE_ENHANCE: {
'count': 2,
'stat': RuneObjectBase.STAT_HP,
'value': 7.0,
'team': True,
'description': '2 Set: Increase the HP of all allies by 7%',
},
RuneObjectBase.TYPE_ACCURACY: {
'count': 2,
'stat': RuneObjectBase.STAT_ACCURACY_PCT,
'value': 10.0,
'team': True,
'description': '2 Set: Increase the Accuracy of all allies by 10%',
},
RuneObjectBase.TYPE_TOLERANCE: {
'count': 2,
'stat': RuneObjectBase.STAT_RESIST_PCT,
'value': 10.0,
'team': True,
'description': '2 Set: Increase the Resistance of all allies by 10%',
},
}
type = models.IntegerField(choices=RuneObjectBase.TYPE_CHOICES)
stars = models.IntegerField()
level = models.IntegerField()
slot = models.IntegerField()
quality = models.IntegerField(default=0, choices=RuneObjectBase.QUALITY_CHOICES)
original_quality = models.IntegerField(choices=RuneObjectBase.QUALITY_CHOICES, blank=True, null=True)
value = models.IntegerField(blank=True, null=True)
main_stat = models.IntegerField(choices=RuneObjectBase.STAT_CHOICES)
main_stat_value = models.IntegerField()
innate_stat = models.IntegerField(choices=RuneObjectBase.STAT_CHOICES, null=True, blank=True)
innate_stat_value = models.IntegerField(null=True, blank=True)
substats = ArrayField(
models.IntegerField(choices=RuneObjectBase.STAT_CHOICES, null=True, blank=True),
size=4,
default=list,
)
substat_values = ArrayField(
models.IntegerField(blank=True, null=True),
size=4,
default=list,
)
# The following fields exist purely to allow easier filtering and are updated on model save
has_hp = models.BooleanField(default=False)
has_atk = models.BooleanField(default=False)
has_def = models.BooleanField(default=False)
has_crit_rate = models.BooleanField(default=False)
has_crit_dmg = models.BooleanField(default=False)
has_speed = models.BooleanField(default=False)
has_resist = models.BooleanField(default=False)
has_accuracy = models.BooleanField(default=False)
efficiency = models.FloatField(blank=True, null=True)
max_efficiency = models.FloatField(blank=True, null=True)
substat_upgrades_remaining = models.IntegerField(blank=True, null=True)
class Meta:
abstract = True
def get_main_stat_rune_display(self):
return RuneObjectBase.STAT_DISPLAY.get(self.main_stat, '')
def get_innate_stat_rune_display(self):
return RuneObjectBase.STAT_DISPLAY.get(self.innate_stat, '')
def get_substat_rune_display(self, idx):
if len(self.substats) > idx:
return RuneObjectBase.STAT_DISPLAY.get(self.substats[idx], '')
else:
return ''
def get_stat(self, stat_type, sub_stats_only=False):
if self.main_stat == stat_type and not sub_stats_only:
return self.main_stat_value
elif self.innate_stat == stat_type and not sub_stats_only:
return self.innate_stat_value
else:
for idx, substat in enumerate(self.substats):
if substat == stat_type:
return self.substat_values[idx]
return 0
@property
def substat_upgrades_received(self):
return int(floor(min(self.level, 12) / 3) + 1)
def get_efficiency(self):
# https://www.youtube.com/watch?v=SBWeptNNbYc
# All runes are compared against max stat values for perfect 6* runes.
# Main stat efficiency
running_sum = float(self.MAIN_STAT_VALUES[self.main_stat][self.stars][15]) / float(self.MAIN_STAT_VALUES[self.main_stat][6][15])
# Substat efficiencies
if self.innate_stat is not None:
running_sum += self.innate_stat_value / float(self.SUBSTAT_INCREMENTS[self.innate_stat][6] * 5)
for substat, value in zip(self.substats, self.substat_values):
running_sum += value / float(self.SUBSTAT_INCREMENTS[substat][6] * 5)
return running_sum / 2.8 * 100
def update_fields(self):
# Set filterable fields
rune_stat_types = [self.main_stat, self.innate_stat] + self.substats
self.has_hp = any([i for i in rune_stat_types if i in [self.STAT_HP, self.STAT_HP_PCT]])
self.has_atk = any([i for i in rune_stat_types if i in [self.STAT_ATK, self.STAT_ATK_PCT]])
self.has_def = any([i for i in rune_stat_types if i in [self.STAT_DEF, self.STAT_DEF_PCT]])
self.has_crit_rate = self.STAT_CRIT_RATE_PCT in rune_stat_types
self.has_crit_dmg = self.STAT_CRIT_DMG_PCT in rune_stat_types
self.has_speed = self.STAT_SPD in rune_stat_types
self.has_resist = self.STAT_RESIST_PCT in rune_stat_types
self.has_accuracy = self.STAT_ACCURACY_PCT in rune_stat_types
self.quality = len([substat for substat in self.substats if substat])
self.substat_upgrades_remaining = 5 - self.substat_upgrades_received
self.efficiency = self.get_efficiency()
self.max_efficiency = self.efficiency + max(ceil((12 - self.level) / 3.0), 0) * 0.2 / 2.8 * 100
# Cap stat values to appropriate value
# Very old runes can have different values, but never higher than the cap
if self.main_stat_value:
self.main_stat_value = min(self.MAIN_STAT_VALUES[self.main_stat][self.stars][15], self.main_stat_value)
else:
self.main_stat_value = self.MAIN_STAT_VALUES[self.main_stat][self.stars][self.level]
if self.innate_stat and self.innate_stat_value > self.SUBSTAT_INCREMENTS[self.innate_stat][self.stars]:
self.innate_stat_value = self.SUBSTAT_INCREMENTS[self.innate_stat][self.stars]
for idx, substat in enumerate(self.substats):
max_sub_value = self.SUBSTAT_INCREMENTS[substat][self.stars] * self.substat_upgrades_received
if self.substat_values[idx] > max_sub_value:
self.substat_values[idx] = max_sub_value
def clean(self):
# Check slot, level, etc for valid ranges
if self.level is None or self.level < 0 or self.level > 15:
raise ValidationError({
'level': ValidationError(
'Level must be 0 through 15.',
code='invalid_rune_level',
)
})
if self.stars is None or (self.stars < 1 or self.stars > 6):
raise ValidationError({
'stars': ValidationError(
'Stars must be between 1 and 6.',
code='invalid_rune_stars',
)
})
if self.slot is not None:
if self.slot < 1 or self.slot > 6:
raise ValidationError({
'slot': ValidationError(
'Slot must be 1 through 6.',
code='invalid_rune_slot',
)
})
# Do slot vs stat check
if self.main_stat not in self.MAIN_STATS_BY_SLOT[self.slot]:
raise ValidationError({
'main_stat': ValidationError(
'Unacceptable stat for slot %(slot)s. Must be %(valid_stats)s.',
params={
'slot': self.slot,
'valid_stats': ', '.join([RuneObjectBase.STAT_CHOICES[stat - 1][1] for stat in self.MAIN_STATS_BY_SLOT[self.slot]])
},
code='invalid_rune_main_stat'
),
})
# Check that the same stat type was not used multiple times
stat_list = list(filter(
partial(is_not, None),
[self.main_stat, self.innate_stat] + self.substats
))
if len(stat_list) != len(set(stat_list)):
raise ValidationError(
'All stats and substats must be unique.',
code='duplicate_stats'
)
# Check if stat type was specified that it has value > 0
if self.main_stat_value is None:
raise ValidationError({
'main_stat_value': ValidationError(
'Missing main stat value.',
code='main_stat_missing_value',
)
})
max_main_stat_value = self.MAIN_STAT_VALUES[self.main_stat][self.stars][self.level]
if self.main_stat_value > max_main_stat_value:
raise ValidationError(
f'Main stat value for {self.get_main_stat_display()} at {self.stars}* lv. {self.level} must be less than {max_main_stat_value}',
code='main_stat_value_invalid',
)
if self.innate_stat is not None:
if self.innate_stat_value is None or self.innate_stat_value <= 0:
raise ValidationError({
'innate_stat_value': ValidationError(
'Must be greater than 0.',
code='invalid_rune_innate_stat_value'
)
})
max_sub_value = self.SUBSTAT_INCREMENTS[self.innate_stat][self.stars]
if self.innate_stat_value > max_sub_value:
raise ValidationError({
'innate_stat_value': ValidationError(
'Must be less than or equal to ' + str(max_sub_value) + '.',
code='invalid_rune_innate_stat_value'
)
})
for substat, value in zip(self.substats, self.substat_values):
if value is None or value <= 0:
raise ValidationError({
f'substat_values]': ValidationError(
'Must be greater than 0.',
code=f'invalid_rune_substat_values'
)
})
max_sub_value = self.SUBSTAT_INCREMENTS[substat][self.stars] * self.substat_upgrades_received
if value > max_sub_value:
raise ValidationError({
f'substat_values': ValidationError(
'Must be less than or equal to ' + str(max_sub_value) + '.',
code=f'invalid_rune_substat_value]'
)
})
class RuneCraft(RuneObjectBase):
CRAFT_GRINDSTONE = 0
CRAFT_ENCHANT_GEM = 1
CRAFT_IMMEMORIAL_GRINDSTONE = 2
CRAFT_IMMEMORIAL_GEM = 3
CRAFT_CHOICES = (
(CRAFT_GRINDSTONE, 'Grindstone'),
(CRAFT_ENCHANT_GEM, 'Enchant Gem'),
(CRAFT_IMMEMORIAL_GRINDSTONE, 'Immemorial Grindstone'),
(CRAFT_IMMEMORIAL_GEM, 'Immemorial Gem'),
)
CRAFT_ENCHANT_GEMS = [
CRAFT_ENCHANT_GEM,
CRAFT_IMMEMORIAL_GEM,
]
CRAFT_GRINDSTONES = [
CRAFT_GRINDSTONE,
CRAFT_IMMEMORIAL_GRINDSTONE,
]
# Type > Stat > Quality > Min/Max
CRAFT_VALUE_RANGES = {
CRAFT_GRINDSTONE: {
RuneObjectBase.STAT_HP: {
RuneObjectBase.QUALITY_NORMAL: {'min': 80, 'max': 120},
RuneObjectBase.QUALITY_MAGIC: {'min': 100, 'max': 200},
RuneObjectBase.QUALITY_RARE: {'min': 180, 'max': 250},
RuneObjectBase.QUALITY_HERO: {'min': 230, 'max': 450},
RuneObjectBase.QUALITY_LEGEND: {'min': 430, 'max': 550},
},
RuneObjectBase.STAT_HP_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 3},
RuneObjectBase.QUALITY_MAGIC: {'min': 2, 'max': 5},
RuneObjectBase.QUALITY_RARE: {'min': 3, 'max': 6},
RuneObjectBase.QUALITY_HERO: {'min': 4, 'max': 7},
RuneObjectBase.QUALITY_LEGEND: {'min': 5, 'max': 10},
},
RuneObjectBase.STAT_ATK: {
RuneObjectBase.QUALITY_NORMAL: {'min': 4, 'max': 8},
RuneObjectBase.QUALITY_MAGIC: {'min': 6, 'max': 12},
RuneObjectBase.QUALITY_RARE: {'min': 10, 'max': 18},
RuneObjectBase.QUALITY_HERO: {'min': 12, 'max': 22},
RuneObjectBase.QUALITY_LEGEND: {'min': 18, 'max': 30},
},
RuneObjectBase.STAT_ATK_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 3},
RuneObjectBase.QUALITY_MAGIC: {'min': 2, 'max': 5},
RuneObjectBase.QUALITY_RARE: {'min': 3, 'max': 6},
RuneObjectBase.QUALITY_HERO: {'min': 4, 'max': 7},
RuneObjectBase.QUALITY_LEGEND: {'min': 5, 'max': 10},
},
RuneObjectBase.STAT_DEF: {
RuneObjectBase.QUALITY_NORMAL: {'min': 4, 'max': 8},
RuneObjectBase.QUALITY_MAGIC: {'min': 6, 'max': 12},
RuneObjectBase.QUALITY_RARE: {'min': 10, 'max': 18},
RuneObjectBase.QUALITY_HERO: {'min': 12, 'max': 22},
RuneObjectBase.QUALITY_LEGEND: {'min': 18, 'max': 30},
},
RuneObjectBase.STAT_DEF_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 3},
RuneObjectBase.QUALITY_MAGIC: {'min': 2, 'max': 5},
RuneObjectBase.QUALITY_RARE: {'min': 3, 'max': 6},
RuneObjectBase.QUALITY_HERO: {'min': 4, 'max': 7},
RuneObjectBase.QUALITY_LEGEND: {'min': 5, 'max': 10},
},
RuneObjectBase.STAT_SPD: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 2},
RuneObjectBase.QUALITY_MAGIC: {'min': 1, 'max': 2},
RuneObjectBase.QUALITY_RARE: {'min': 2, 'max': 3},
RuneObjectBase.QUALITY_HERO: {'min': 3, 'max': 4},
RuneObjectBase.QUALITY_LEGEND: {'min': 4, 'max': 5},
},
RuneObjectBase.STAT_CRIT_RATE_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 2},
RuneObjectBase.QUALITY_MAGIC: {'min': 1, 'max': 3},
RuneObjectBase.QUALITY_RARE: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_HERO: {'min': 3, 'max': 5},
RuneObjectBase.QUALITY_LEGEND: {'min': 4, 'max': 6},
},
RuneObjectBase.STAT_CRIT_DMG_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 3},
RuneObjectBase.QUALITY_MAGIC: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_RARE: {'min': 2, 'max': 5},
RuneObjectBase.QUALITY_HERO: {'min': 3, 'max': 5},
RuneObjectBase.QUALITY_LEGEND: {'min': 4, 'max': 7},
},
RuneObjectBase.STAT_RESIST_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 3},
RuneObjectBase.QUALITY_MAGIC: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_RARE: {'min': 2, 'max': 5},
RuneObjectBase.QUALITY_HERO: {'min': 3, 'max': 7},
RuneObjectBase.QUALITY_LEGEND: {'min': 4, 'max': 8},
},
RuneObjectBase.STAT_ACCURACY_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 3},
RuneObjectBase.QUALITY_MAGIC: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_RARE: {'min': 2, 'max': 5},
RuneObjectBase.QUALITY_HERO: {'min': 3, 'max': 7},
RuneObjectBase.QUALITY_LEGEND: {'min': 4, 'max': 8},
},
},
CRAFT_ENCHANT_GEM: {
RuneObjectBase.STAT_HP: {
RuneObjectBase.QUALITY_NORMAL: {'min': 100, 'max': 150},
RuneObjectBase.QUALITY_MAGIC: {'min': 130, 'max': 220},
RuneObjectBase.QUALITY_RARE: {'min': 200, 'max': 310},
RuneObjectBase.QUALITY_HERO: {'min': 290, 'max': 420},
RuneObjectBase.QUALITY_LEGEND: {'min': 400, 'max': 580},
},
RuneObjectBase.STAT_HP_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_MAGIC: {'min': 3, 'max': 7},
RuneObjectBase.QUALITY_RARE: {'min': 5, 'max': 9},
RuneObjectBase.QUALITY_HERO: {'min': 7, 'max': 11},
RuneObjectBase.QUALITY_LEGEND: {'min': 9, 'max': 13},
},
RuneObjectBase.STAT_ATK: {
RuneObjectBase.QUALITY_NORMAL: {'min': 8, 'max': 12},
RuneObjectBase.QUALITY_MAGIC: {'min': 10, 'max': 16},
RuneObjectBase.QUALITY_RARE: {'min': 15, 'max': 23},
RuneObjectBase.QUALITY_HERO: {'min': 20, 'max': 30},
RuneObjectBase.QUALITY_LEGEND: {'min': 28, 'max': 40},
},
RuneObjectBase.STAT_ATK_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_MAGIC: {'min': 3, 'max': 7},
RuneObjectBase.QUALITY_RARE: {'min': 5, 'max': 9},
RuneObjectBase.QUALITY_HERO: {'min': 7, 'max': 11},
RuneObjectBase.QUALITY_LEGEND: {'min': 9, 'max': 13},
},
RuneObjectBase.STAT_DEF: {
RuneObjectBase.QUALITY_NORMAL: {'min': 8, 'max': 12},
RuneObjectBase.QUALITY_MAGIC: {'min': 10, 'max': 16},
RuneObjectBase.QUALITY_RARE: {'min': 15, 'max': 23},
RuneObjectBase.QUALITY_HERO: {'min': 20, 'max': 30},
RuneObjectBase.QUALITY_LEGEND: {'min': 28, 'max': 40},
},
RuneObjectBase.STAT_DEF_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_MAGIC: {'min': 3, 'max': 7},
RuneObjectBase.QUALITY_RARE: {'min': 5, 'max': 9},
RuneObjectBase.QUALITY_HERO: {'min': 7, 'max': 11},
RuneObjectBase.QUALITY_LEGEND: {'min': 9, 'max': 13},
},
RuneObjectBase.STAT_SPD: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 3},
RuneObjectBase.QUALITY_MAGIC: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_RARE: {'min': 3, 'max': 6},
RuneObjectBase.QUALITY_HERO: {'min': 5, 'max': 8},
RuneObjectBase.QUALITY_LEGEND: {'min': 7, 'max': 10},
},
RuneObjectBase.STAT_CRIT_RATE_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 3},
RuneObjectBase.QUALITY_MAGIC: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_RARE: {'min': 3, 'max': 5},
RuneObjectBase.QUALITY_HERO: {'min': 4, 'max': 7},
RuneObjectBase.QUALITY_LEGEND: {'min': 6, 'max': 9},
},
RuneObjectBase.STAT_CRIT_DMG_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_MAGIC: {'min': 3, 'max': 5},
RuneObjectBase.QUALITY_RARE: {'min': 4, 'max': 6},
RuneObjectBase.QUALITY_HERO: {'min': 5, 'max': 8},
RuneObjectBase.QUALITY_LEGEND: {'min': 7, 'max': 10},
},
RuneObjectBase.STAT_RESIST_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_MAGIC: {'min': 3, 'max': 6},
RuneObjectBase.QUALITY_RARE: {'min': 5, 'max': 8},
RuneObjectBase.QUALITY_HERO: {'min': 6, 'max': 9},
RuneObjectBase.QUALITY_LEGEND: {'min': 8, 'max': 11},
},
RuneObjectBase.STAT_ACCURACY_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_MAGIC: {'min': 3, 'max': 6},
RuneObjectBase.QUALITY_RARE: {'min': 5, 'max': 8},
RuneObjectBase.QUALITY_HERO: {'min': 6, 'max': 9},
RuneObjectBase.QUALITY_LEGEND: {'min': 8, 'max': 11},
},
}
}
CRAFT_VALUE_RANGES[CRAFT_IMMEMORIAL_GEM] = CRAFT_VALUE_RANGES[CRAFT_ENCHANT_GEM]
CRAFT_VALUE_RANGES[CRAFT_IMMEMORIAL_GRINDSTONE] = CRAFT_VALUE_RANGES[CRAFT_GRINDSTONE]
class Dungeon(models.Model):
CATEGORY_SCENARIO = 0
CATEGORY_RUNE_DUNGEON = 1
CATEGORY_ESSENCE_DUNGEON = 2
CATEGORY_OTHER_DUNGEON = 3
CATEGORY_RAID = 4
CATEGORY_HALL_OF_HEROES = 5
CATEGORY_CHOICES = [
(CATEGORY_SCENARIO, 'Scenarios'),
(CATEGORY_RUNE_DUNGEON, 'Rune Dungeons'),
(CATEGORY_ESSENCE_DUNGEON, 'Elemental Dungeons'),
(CATEGORY_OTHER_DUNGEON, 'Other Dungeons'),
(CATEGORY_RAID, 'Raids'),
(CATEGORY_HALL_OF_HEROES, 'Hall of Heroes'),
]
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=100)
max_floors = models.IntegerField(default=10)
slug = models.SlugField(blank=True, null=True)
category = models.IntegerField(choices=CATEGORY_CHOICES, blank=True, null=True)
# TODO: Remove following fields when Level model is fully utilized everywhere: energy_cost, xp, monster_slots
# For the following fields:
# Outer array index is difficulty (normal, hard, hell). Inner array index is the stage/floor
# Example: Hell B2 is dungeon.energy_cost[RunLog.DIFFICULTY_HELL][1]
energy_cost = ArrayField(ArrayField(models.IntegerField(blank=True, null=True)), blank=True, null=True)
xp = ArrayField(ArrayField(models.IntegerField(blank=True, null=True)), blank=True, null=True)
monster_slots = ArrayField(ArrayField(models.IntegerField(blank=True, null=True)), blank=True, null=True)
class Meta:
ordering = ['id', ]
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super(Dungeon, self).save(*args, **kwargs)
class Level(models.Model):
DIFFICULTY_NORMAL = 1
DIFFICULTY_HARD = 2
DIFFICULTY_HELL = 3
DIFFICULTY_CHOICES = (
(DIFFICULTY_NORMAL, 'Normal'),
(DIFFICULTY_HARD, 'Hard'),
(DIFFICULTY_HELL, 'Hell'),
)
dungeon = models.ForeignKey(Dungeon, on_delete=models.CASCADE)
floor = models.IntegerField()
difficulty = models.IntegerField(choices=DIFFICULTY_CHOICES, blank=True, null=True)
energy_cost = models.IntegerField(blank=True, null=True, help_text='Energy cost to start a run')
xp = models.IntegerField(blank=True, null=True, help_text='XP gained by fully clearing the level')
frontline_slots = models.IntegerField(
default=5,
help_text='Serves as general slots if dungeon does not have front/back lines'
)
backline_slots = models.IntegerField(blank=True, null=True, help_text='Leave null for normal dungeons')
max_slots = models.IntegerField(
blank=True,
null=True,
help_text='Maximum monsters combined front/backline. Not required if backline not specified.'
)
class Meta:
ordering = ('difficulty', 'floor')
unique_together = ('dungeon', 'floor', 'difficulty')
def __str__(self):
return f'{self.dungeon_id} {self.floor} - {self.get_difficulty_display()}'
class GuideBase(models.Model):
short_text = models.TextField(blank=True, default='')
long_text = models.TextField(blank=True, default='')
last_updated = models.DateTimeField(auto_now=True)
edited_by = models.ForeignKey(User, null=True, blank=True, on_delete=models.SET_NULL, editable=False)
class Meta:
abstract = True
class MonsterGuide(GuideBase):
monster = models.OneToOneField(Monster, on_delete=models.CASCADE)
def __str__(self):
return f'Monster Guide - {self.monster}'
class Meta:
ordering = ['monster__name']
| apache-2.0 | -5,660,210,679,202,290,000 | 39.546917 | 188 | 0.562391 | false |
akrause2014/dispel4py | dispel4py/new/mpi_process_test.py | 1 | 1115 | # Copyright (c) The University of Edinburgh 2014
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dispel4py.new.mpi_process import process
from dispel4py.workflow_graph import WorkflowGraph
from dispel4py.examples.graph_testing.testing_PEs import TestProducer, TestOneInOneOut
from mpi4py import MPI
comm=MPI.COMM_WORLD
rank=comm.Get_rank()
size=comm.Get_size()
prod = TestProducer()
cons1 = TestOneInOneOut()
cons2 = TestOneInOneOut()
graph = WorkflowGraph()
graph.connect(prod, 'output', cons1, 'input')
graph.connect(cons1, 'output', cons2, 'input')
process(graph, { prod : [ {}, {}, {} ] } )
| apache-2.0 | 5,902,654,700,553,839,000 | 32.787879 | 86 | 0.747982 | false |
dbdd4us/compose | compose/utils.py | 1 | 3235 | from __future__ import absolute_import
from __future__ import unicode_literals
import codecs
import hashlib
import json
import json.decoder
import logging
import ntpath
import six
from .errors import StreamParseError
json_decoder = json.JSONDecoder()
log = logging.getLogger(__name__)
def get_output_stream(stream):
if six.PY3:
return stream
return codecs.getwriter('utf-8')(stream)
def stream_as_text(stream):
"""Given a stream of bytes or text, if any of the items in the stream
are bytes convert them to text.
This function can be removed once docker-py returns text streams instead
of byte streams.
"""
for data in stream:
if not isinstance(data, six.text_type):
data = data.decode('utf-8', 'replace')
yield data
def line_splitter(buffer, separator=u'\n'):
index = buffer.find(six.text_type(separator))
if index == -1:
return None
return buffer[:index + 1], buffer[index + 1:]
def split_buffer(stream, splitter=None, decoder=lambda a: a):
"""Given a generator which yields strings and a splitter function,
joins all input, splits on the separator and yields each chunk.
Unlike string.split(), each chunk includes the trailing
separator, except for the last one if none was found on the end
of the input.
"""
splitter = splitter or line_splitter
buffered = six.text_type('')
for data in stream_as_text(stream):
buffered += data
while True:
buffer_split = splitter(buffered)
if buffer_split is None:
break
item, buffered = buffer_split
yield item
if buffered:
try:
yield decoder(buffered)
except Exception as e:
log.error(
'Compose tried decoding the following data chunk, but failed:'
'\n%s' % repr(buffered)
)
raise StreamParseError(e)
def json_splitter(buffer):
"""Attempt to parse a json object from a buffer. If there is at least one
object, return it and the rest of the buffer, otherwise return None.
"""
buffer = buffer.strip()
try:
obj, index = json_decoder.raw_decode(buffer)
rest = buffer[json.decoder.WHITESPACE.match(buffer, index).end():]
return obj, rest
except ValueError:
return None
def json_stream(stream):
"""Given a stream of text, return a stream of json objects.
This handles streams which are inconsistently buffered (some entries may
be newline delimited, and others are not).
"""
return split_buffer(stream, json_splitter, json_decoder.decode)
def json_hash(obj):
dump = json.dumps(obj, sort_keys=True, separators=(',', ':'))
h = hashlib.sha256()
h.update(dump.encode('utf8'))
return h.hexdigest()
def microseconds_from_time_nano(time_nano):
return int(time_nano % 1000000000 / 1000)
def build_string_dict(source_dict):
return dict((k, str(v if v is not None else '')) for k, v in source_dict.items())
def splitdrive(path):
if len(path) == 0:
return ('', '')
if path[0] in ['.', '\\', '/', '~']:
return ('', path)
return ntpath.splitdrive(path)
| apache-2.0 | -2,486,481,074,851,938,300 | 26.184874 | 85 | 0.636785 | false |
javierwilson/forocacao | forocacao/app/png.py | 1 | 4177 | # -*- coding: utf-8 -*-
from PIL import Image, ImageDraw, ImageFont
import textwrap
from unidecode import unidecode
from reportlab.graphics import renderPM
from reportlab.graphics.barcode import code128
from reportlab.graphics.barcode import createBarcodeDrawing
from reportlab.graphics.barcode import createBarcodeImageInMemory
from reportlab.graphics.shapes import Drawing
from django.conf import settings
def get_barcode(value, width, humanReadable = True):
#value = value.encode('ascii', 'ignore')
value = unidecode(value)
barcode = createBarcodeDrawing('Code128', value = value, humanReadable = humanReadable, fontSize = 8)
drawing_width = width
barcode_scale = drawing_width / barcode.width
drawing_height = barcode.height * barcode_scale
drawing = Drawing(drawing_width, drawing_height)
drawing.scale(barcode_scale, barcode_scale)
drawing.add(barcode, name='barcode')
return drawing
def createPNG(participant, where):
event = participant.event
badge_size_x = event.badge_size_x or 390
badge_size_y = event.badge_size_y or 260
badge_color = event.badge_color or "#FFFFFF"
image_file = settings.MEDIA_ROOT + '/gafete390x260.png'
img = Image.open(image_file)
#img = Image.new('RGBA', (badge_size_x, badge_size_y), badge_color)
draw = ImageDraw.Draw(img)
draw.rectangle(((0,0),(badge_size_x-1, badge_size_y-1)), outline = "black")
if (len(participant.last_name) + len(participant.first_name) > 20):
last_name = participant.last_name.partition(' ')[0] if len(participant.last_name) > 12 else participant.last_name
first_name = participant.first_name.partition(' ')[0] if len(participant.first_name) >= 12 else participant.first_name
else:
last_name = participant.last_name
first_name = participant.first_name
match = {
'event': event.name,
#'name': "%s %s" % (participant.first_name, participant.last_name ),
#'name': "%s %s" % (participant.first_name.partition(' ')[0], participant.last_name.partition(' ')[0]),
'name': "%s %s" % (first_name, last_name),
'first_name': participant.first_name,
'last_name': participant.last_name,
'profession': participant.profession,
'organization': participant.organization,
'country': participant.country.name,
'type': participant.type,
'email': participant.email,
}
for field in event.eventbadge_set.all():
x = field.x
y = field.y
size = field.size
if field.field == 'logo':
if participant.event.logo:
logo = Image.open(participant.event.logo.file.file)
logo.thumbnail((size,size))
img.paste(logo, (x,y))
elif field.field == 'photo':
if participant.photo:
photo = Image.open(participant.photo)
photo.thumbnail((size,size))
img.paste(photo, (x,y))
else:
if field.field == 'text':
content = field.format
else:
content = match[field.field]
fnt = ImageFont.truetype(field.font.filename, size)
color = field.color
text = ("%s") % (content)
textsize = draw.textsize(text, font=fnt)
if textsize[0]+x < badge_size_x:
draw.text((x,y), ("%s") % (content), font=fnt, fill=color)
else:
# calculate maximum size in characters
max_chars = (badge_size_x-(x*2)) * len(text) / textsize[0]
lines = textwrap.fill(text, max_chars).splitlines()
tmp = y
for line in lines:
draw.text((x,y), line, font=fnt, fill=color)
y += size
y = tmp
# FIXME: NO barcode
#short_full_name = "%s: %s" % (participant.id, participant.short_full_name())
#barcode = get_barcode(short_full_name, badge_size_x-4)
#barcode_image = renderPM.drawToPIL(barcode)
#img.paste(barcode_image, (0+2, badge_size_y-70))
img.save(where, "PNG")
| bsd-3-clause | -7,824,533,436,109,494,000 | 38.40566 | 126 | 0.603304 | false |
mhahn/stacker | stacker/lookups/registry.py | 1 | 1745 | from ..exceptions import UnknownLookupType
from ..util import load_object_from_string
from .handlers import output
from .handlers import kms
from .handlers import xref
from .handlers import file as file_handler
LOOKUP_HANDLERS = {}
DEFAULT_LOOKUP = output.TYPE_NAME
def register_lookup_handler(lookup_type, handler_or_path):
"""Register a lookup handler.
Args:
lookup_type (str): Name to register the handler under
handler_or_path (OneOf[func, str]): a function or a path to a handler
"""
handler = handler_or_path
if isinstance(handler_or_path, basestring):
handler = load_object_from_string(handler_or_path)
LOOKUP_HANDLERS[lookup_type] = handler
def resolve_lookups(lookups, context, provider):
"""Resolve a set of lookups.
Args:
lookups (list of :class:`stacker.lookups.Lookup`): a list of stacker
lookups to resolve
context (:class:`stacker.context.Context`): stacker context
provider (:class:`stacker.provider.base.BaseProvider`): subclass of the
base provider
Returns:
dict: dict of Lookup -> resolved value
"""
resolved_lookups = {}
for lookup in lookups:
try:
handler = LOOKUP_HANDLERS[lookup.type]
except KeyError:
raise UnknownLookupType(lookup)
resolved_lookups[lookup] = handler(
value=lookup.input,
context=context,
provider=provider,
)
return resolved_lookups
register_lookup_handler(output.TYPE_NAME, output.handler)
register_lookup_handler(kms.TYPE_NAME, kms.handler)
register_lookup_handler(xref.TYPE_NAME, xref.handler)
register_lookup_handler(file_handler.TYPE_NAME, file_handler.handler)
| bsd-2-clause | -1,027,917,854,892,513,400 | 29.614035 | 79 | 0.679083 | false |
SKIRT/PTS | do/core/remove_columns.py | 1 | 2196 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.do.core.remove_columns Remove column(s) from a table.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import the relevant PTS classes and modules
from pts.core.basics.table import SmartTable
from pts.core.basics.configuration import ConfigurationDefinition, parse_arguments
from pts.core.tools import filesystem as fs
from pts.core.tools import sequences
# -----------------------------------------------------------------
# Create configuration
definition = ConfigurationDefinition(write_config=False)
#definition.add_required("filename", "file_path", "table file")
definition.add_required("columns", "string_list", "names of columns")
definition.add_optional("method", "string", "table reading method", "lines")
config = parse_arguments("remove_columns", definition)
# -----------------------------------------------------------------
# Find table files
filepaths = fs.files_in_cwd(extension="dat", recursive=True)
for filepath in filepaths:
#print(filepath)
directory_name = fs.name(fs.directory_of(filepath))
filename = fs.name(filepath)
# Get column names
column_names = fs.get_column_names(filepath)
if not sequences.contains_any(column_names, config.columns): continue
print(directory_name, filename, column_names)
# -----------------------------------------------------------------
# Load the table
table = SmartTable.from_file(filepath, method=config.method)
# -----------------------------------------------------------------
# Remove the columns
table.remove_columns(config.columns)
# -----------------------------------------------------------------
# Save the table
table.save()
# -----------------------------------------------------------------
| agpl-3.0 | -2,754,714,214,914,406,400 | 34.403226 | 82 | 0.51754 | false |
charlesfleche/charlesfleche.net | fabfile.py | 1 | 3663 | from fabric.api import *
import fabric.contrib.project as project
import http.server
import os
import shutil
import sys
import socketserver
# Local path configuration (can be absolute or relative to fabfile)
env.deploy_path = 'output'
DEPLOY_PATH = env.deploy_path
# Remote server configuration
production = '[email protected]'
dest_path = '/var/www/charlesfleche.net'
nginx_site_path = '/etc/nginx/sites-available/charlesfleche.net'
icons_root = 'themes/charlesfleche/static'
css_root = 'themes/charlesfleche/static/css'
# Rackspace Cloud Files configuration settings
env.cloudfiles_username = 'my_rackspace_username'
env.cloudfiles_api_key = 'my_rackspace_api_key'
env.cloudfiles_container = 'my_cloudfiles_container'
# Github Pages configuration
env.github_pages_branch = "gh-pages"
# Port for `serve`
PORT = 8000
def goaccess():
"""Create goaccess realtime web report"""
local('''ssh [email protected] 'tail -n +1 -f /var/log/nginx/blog.access.log' | goaccess -o /tmp/report.html --log-format=COMBINED --real-time-html --geoip-database GeoLite2-Country.mmdb -a -'''.format(production))
def clean():
"""Remove generated files"""
if os.path.isdir(DEPLOY_PATH):
shutil.rmtree(DEPLOY_PATH)
os.makedirs(DEPLOY_PATH)
def build():
"""Build local version of site"""
local('pelican -s pelicanconf.py')
def build_icons():
"""Build icons"""
local('inkscape -z -e /tmp/favicon.png -w 64 -h 64 logo.svg')
local('cp logo.svg {}'.format(icons_root))
local('convert /tmp/favicon.png {}/favicon.ico'.format(icons_root))
local('inkscape -z -e {}/icon.png -w 192 -h 192 logo.svg'.format(icons_root))
local('inkscape -z -e {}/tile.png -w 558 -h 558 logo.svg'.format(icons_root))
local('inkscape -z -e {}/tile-wide.png -w 558 -h 270 --export-area=-5:0:15:10 logo.svg'.format(icons_root))
def copy_fonts():
'''Copy icomoon fonts to theme folder'''
local('cp icomoon/style.css {}/fonts.css'.format(css_root))
local('cp -r icomoon/fonts {}'.format(css_root))
def rebuild():
"""`build` with the delete switch"""
local('pelican -d -s pelicanconf.py')
def regenerate():
"""Automatically regenerate site upon file modification"""
local('pelican -r -s pelicanconf.py')
def serve():
"""Serve site at http://localhost:8000/"""
os.chdir(env.deploy_path)
with http.server.HTTPServer(("", PORT), http.server.SimpleHTTPRequestHandler) as httpd:
print("Serving at port", PORT)
httpd.serve_forever()
def reserve():
"""`build`, then `serve`"""
build()
serve()
def preview():
"""Build production version of site"""
local('pelican -s publishconf.py')
def cf_upload():
"""Publish to Rackspace Cloud Files"""
rebuild()
with lcd(DEPLOY_PATH):
local('swift -v -A https://auth.api.rackspacecloud.com/v1.0 '
'-U {cloudfiles_username} '
'-K {cloudfiles_api_key} '
'upload -c {cloudfiles_container} .'.format(**env))
@hosts(production)
def publish():
"""Publish to production via rsync"""
local('pelican -s publishconf.py')
project.rsync_project(
remote_dir=dest_path,
exclude=['.DS_Store', 'Articles', '.webassets-cache'],
local_dir=DEPLOY_PATH.rstrip('/') + '/',
delete=True,
extra_opts='-c',
)
@hosts(production)
def publish_nginx():
put('nginx.site', nginx_site_path, use_sudo=True)
@hosts(production)
def reload_nginx():
sudo('sudo systemctl reload nginx')
def gh_pages():
"""Publish to GitHub Pages"""
rebuild()
local("ghp-import -b {github_pages_branch} {deploy_path} -p".format(**env))
| mit | -9,078,609,407,775,656,000 | 30.577586 | 223 | 0.663118 | false |
cylc/cylc | cylc/flow/broadcast_report.py | 1 | 3589 | # THIS FILE IS PART OF THE CYLC SUITE ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Provide a function to report modification to broadcast settings."""
BAD_OPTIONS_FMT = "\n --%s=%s"
BAD_OPTIONS_TITLE = "No broadcast to cancel/clear for these options:"
BAD_OPTIONS_TITLE_SET = ("Rejected broadcast: settings are not"
" compatible with the suite")
CHANGE_FMT = "\n%(change)s [%(namespace)s.%(point)s] %(key)s=%(value)s"
CHANGE_PREFIX_CANCEL = "-"
CHANGE_PREFIX_SET = "+"
CHANGE_TITLE_CANCEL = "Broadcast cancelled:"
CHANGE_TITLE_SET = "Broadcast set:"
def get_broadcast_bad_options_report(bad_options, is_set=False):
"""Return a string to report bad options for broadcast cancel/clear."""
if not bad_options:
return None
if is_set:
msg = BAD_OPTIONS_TITLE_SET
else:
msg = BAD_OPTIONS_TITLE
for key, values in sorted(bad_options.items()):
for value in values:
if isinstance(value, tuple) or isinstance(value, list):
value_str = ""
values = list(value)
while values:
val = values.pop(0)
if values:
value_str += "[" + val + "]"
else:
value_str += val
else:
value_str = value
msg += BAD_OPTIONS_FMT % (key, value_str)
return msg
def get_broadcast_change_iter(modified_settings, is_cancel=False):
"""Return an iterator of broadcast changes.
Each broadcast change is a dict with keys:
change, point, namespace, key, value
"""
if not modified_settings:
return
if is_cancel:
change = CHANGE_PREFIX_CANCEL
else:
change = CHANGE_PREFIX_SET
for modified_setting in sorted(modified_settings,
key=lambda x: (x[0], x[1])):
# sorted by (point, namespace)
point, namespace, setting = modified_setting
value = setting
keys_str = ""
while isinstance(value, dict):
key, value = list(value.items())[0]
if isinstance(value, dict):
keys_str += "[" + key + "]"
else:
keys_str += key
yield {
"change": change,
"point": point,
"namespace": namespace,
"key": keys_str,
"value": str(value)}
def get_broadcast_change_report(modified_settings, is_cancel=False):
"""Return a string for reporting modification to broadcast settings."""
if not modified_settings:
return ""
if is_cancel:
msg = CHANGE_TITLE_CANCEL
else:
msg = CHANGE_TITLE_SET
for broadcast_change in get_broadcast_change_iter(
modified_settings, is_cancel):
msg += CHANGE_FMT % broadcast_change
return msg
| gpl-3.0 | -5,435,023,536,567,714,000 | 35.252525 | 75 | 0.593201 | false |
dceoy/fractus | fract/model/ewma.py | 1 | 1996 | #!/usr/bin/env python
import logging
import numpy as np
from .sieve import LRFeatureSieve
class Ewma(object):
def __init__(self, config_dict):
self.__logger = logging.getLogger(__name__)
self.__alpha = config_dict['model']['ewma']['alpha']
self.__sigma_band = config_dict['model']['ewma']['sigma_band']
self.__lrfs = LRFeatureSieve(
type=config_dict['feature']['type'],
weight_decay=config_dict['model']['ewma']['alpha']
)
def detect_signal(self, history_dict, pos=None):
best_f = self.__lrfs.extract_best_feature(history_dict=history_dict)
sig_dict = self._ewm_stats(series=best_f['series'])
if sig_dict['ewmbb'][0] > 0:
sig_act = 'buy'
elif sig_dict['ewmbb'][1] < 0:
sig_act = 'sell'
elif pos and (
(pos['side'] == 'buy' and sig_dict['ewma'] < 0) or
(pos['side'] == 'sell' and sig_dict['ewma'] > 0)):
sig_act = 'close'
else:
sig_act = None
sig_log_str = '{:^40}|'.format(
'{0:>3}[{1:>3}]:{2:>9}{3:>18}'.format(
self.__lrfs.code, best_f['granularity_str'],
'{:.1g}'.format(sig_dict['ewma']),
np.array2string(
sig_dict['ewmbb'],
formatter={'float_kind': lambda f: '{:.1g}'.format(f)}
)
)
)
return {
'sig_act': sig_act, 'sig_log_str': sig_log_str,
'sig_ewma': sig_dict['ewma'], 'sig_ewmbbl': sig_dict['ewmbb'][0],
'sig_ewmbbu': sig_dict['ewmbb'][1]
}
def _ewm_stats(self, series):
ewm = series.ewm(alpha=self.__alpha)
ewma = ewm.mean().iloc[-1]
self.__logger.debug('ewma: {}'.format(ewma))
ewm_bollinger_band = (
np.array([-1, 1]) * ewm.std().iloc[-1] * self.__sigma_band
) + ewma
return {'ewma': ewma, 'ewmbb': ewm_bollinger_band}
| mit | -7,870,564,357,281,349,000 | 35.962963 | 77 | 0.491483 | false |
tmm/django-username-email | cuser/admin.py | 1 | 1437 | from django.contrib import admin
from django.contrib.auth.admin import GroupAdmin as BaseGroupAdmin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import Group as StockGroup
from django.utils.translation import gettext_lazy as _
from cuser.forms import UserChangeForm, UserCreationForm
from cuser.models import CUser, Group
from cuser.settings import CUSER_SETTINGS
@admin.register(CUser)
class UserAdmin(BaseUserAdmin):
add_form_template = 'admin/cuser/cuser/add_form.html'
fieldsets = (
(None, {'fields': ('email', 'password')}),
(_('Personal info'), {'fields': ('first_name', 'last_name')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2'),
}),
)
form = UserChangeForm
add_form = UserCreationForm
list_display = ('email', 'first_name', 'last_name', 'is_staff')
search_fields = ('email', 'first_name', 'last_name')
ordering = ('email',)
if CUSER_SETTINGS['register_proxy_auth_group_model']:
admin.site.unregister(StockGroup)
@admin.register(Group)
class GroupAdmin(BaseGroupAdmin):
pass
| mit | 9,063,590,292,572,750,000 | 34.925 | 79 | 0.637439 | false |
anthonyfok/frescobaldi | frescobaldi_app/docbrowser/browser.py | 1 | 10953 | # This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
The browser widget for the help browser.
"""
import os
from PyQt5.QtCore import QSettings, Qt, QUrl
from PyQt5.QtGui import QKeySequence
from PyQt5.QtPrintSupport import QPrintDialog, QPrinter
from PyQt5.QtWebKit import QWebSettings
from PyQt5.QtWebKitWidgets import QWebPage, QWebView
from PyQt5.QtWidgets import QComboBox, QMenu, QToolBar, QVBoxLayout, QWidget
import app
import icons
import helpers
import widgets.lineedit
import lilypondinfo
import lilydoc.manager
import lilydoc.network
import textformats
class Browser(QWidget):
"""LilyPond documentation browser widget."""
def __init__(self, dockwidget):
super(Browser, self).__init__(dockwidget)
layout = QVBoxLayout(spacing=0)
layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(layout)
self.toolbar = tb = QToolBar()
self.webview = QWebView(contextMenuPolicy=Qt.CustomContextMenu)
self.chooser = QComboBox(sizeAdjustPolicy=QComboBox.AdjustToContents)
self.search = SearchEntry(maximumWidth=200)
layout.addWidget(self.toolbar)
layout.addWidget(self.webview)
ac = dockwidget.actionCollection
ac.help_back.triggered.connect(self.webview.back)
ac.help_forward.triggered.connect(self.webview.forward)
ac.help_home.triggered.connect(self.showHomePage)
ac.help_print.triggered.connect(self.slotPrint)
self.webview.page().setNetworkAccessManager(lilydoc.network.accessmanager())
self.webview.page().setLinkDelegationPolicy(QWebPage.DelegateAllLinks)
self.webview.page().linkClicked.connect(self.openUrl)
self.webview.page().setForwardUnsupportedContent(True)
self.webview.page().unsupportedContent.connect(self.slotUnsupported)
self.webview.urlChanged.connect(self.slotUrlChanged)
self.webview.customContextMenuRequested.connect(self.slotShowContextMenu)
tb.addAction(ac.help_back)
tb.addAction(ac.help_forward)
tb.addSeparator()
tb.addAction(ac.help_home)
tb.addAction(ac.help_print)
tb.addSeparator()
tb.addWidget(self.chooser)
tb.addWidget(self.search)
self.chooser.activated[int].connect(self.showHomePage)
self.search.textEdited.connect(self.slotSearchChanged)
self.search.returnPressed.connect(self.slotSearchReturnPressed)
dockwidget.mainwindow().iconSizeChanged.connect(self.updateToolBarSettings)
dockwidget.mainwindow().toolButtonStyleChanged.connect(self.updateToolBarSettings)
app.settingsChanged.connect(self.readSettings)
self.readSettings()
self.loadDocumentation()
self.showInitialPage()
app.settingsChanged.connect(self.loadDocumentation)
app.translateUI(self)
def readSettings(self):
s = QSettings()
s.beginGroup("documentation")
ws = self.webview.page().settings()
family = s.value("fontfamily", self.font().family(), str)
size = s.value("fontsize", 16, int)
ws.setFontFamily(QWebSettings.StandardFont, family)
ws.setFontSize(QWebSettings.DefaultFontSize, size)
fixed = textformats.formatData('editor').font
ws.setFontFamily(QWebSettings.FixedFont, fixed.family())
ws.setFontSize(QWebSettings.DefaultFixedFontSize, fixed.pointSizeF() * 96 / 72)
def keyPressEvent(self, ev):
if ev.text() == "/":
self.search.setFocus()
else:
super(Browser, self).keyPressEvent(ev)
def translateUI(self):
try:
self.search.setPlaceholderText(_("Search..."))
except AttributeError:
pass # not in Qt 4.6
def showInitialPage(self):
"""Shows the preferred start page.
If a local documentation instance already has a suitable version,
just loads it. Otherwise connects to the allLoaded signal, that is
emitted when all the documentation instances have loaded their version
information and then shows the start page (if another page wasn't yet
loaded).
"""
if self.webview.url().isEmpty():
docs = lilydoc.manager.docs()
version = lilypondinfo.preferred().version()
index = -1
if version:
for num, doc in enumerate(docs):
if doc.version() is not None and doc.version() >= version:
index = num # a suitable documentation is found
break
if index == -1:
# nothing found (or LilyPond version not available),
# wait for loading or show the most recent version
if not lilydoc.manager.loaded():
lilydoc.manager.allLoaded.connect(self.showInitialPage)
return
index = len(docs) - 1
self.chooser.setCurrentIndex(index)
self.showHomePage()
def loadDocumentation(self):
"""Puts the available documentation instances in the combobox."""
i = self.chooser.currentIndex()
self.chooser.clear()
for doc in lilydoc.manager.docs():
v = doc.versionString()
if doc.isLocal():
t = _("(local)")
else:
t = _("({hostname})").format(hostname=doc.url().host())
self.chooser.addItem("{0} {1}".format(v or _("<unknown>"), t))
self.chooser.setCurrentIndex(i)
if not lilydoc.manager.loaded():
lilydoc.manager.allLoaded.connect(self.loadDocumentation, -1)
return
def updateToolBarSettings(self):
mainwin = self.parentWidget().mainwindow()
self.toolbar.setIconSize(mainwin.iconSize())
self.toolbar.setToolButtonStyle(mainwin.toolButtonStyle())
def showManual(self):
"""Invoked when the user presses F1."""
self.slotHomeFrescobaldi() # TEMP
def slotUrlChanged(self):
ac = self.parentWidget().actionCollection
ac.help_back.setEnabled(self.webview.history().canGoBack())
ac.help_forward.setEnabled(self.webview.history().canGoForward())
def openUrl(self, url):
if url.path().endswith(('.ily', '.lyi', '.ly')):
self.sourceViewer().showReply(lilydoc.network.get(url))
else:
self.webview.load(url)
def slotUnsupported(self, reply):
helpers.openUrl(reply.url())
def slotSearchChanged(self):
text = self.search.text()
if not text.startswith(':'):
self.webview.page().findText(text, QWebPage.FindWrapsAroundDocument)
def slotSearchReturnPressed(self):
text = self.search.text()
if not text.startswith(':'):
self.slotSearchChanged()
else:
pass # TODO: implement full doc search
def sourceViewer(self):
try:
return self._sourceviewer
except AttributeError:
from . import sourceviewer
self._sourceviewer = sourceviewer.SourceViewer(self)
return self._sourceviewer
def showHomePage(self):
"""Shows the homepage of the LilyPond documentation."""
i = self.chooser.currentIndex()
if i < 0:
i = 0
doc = lilydoc.manager.docs()[i]
url = doc.home()
if doc.isLocal():
path = url.toLocalFile()
langs = lilydoc.network.langs()
if langs:
for lang in langs:
if os.path.exists(path + '.' + lang + '.html'):
path += '.' + lang
break
url = QUrl.fromLocalFile(path + '.html')
self.webview.load(url)
def slotPrint(self):
printer = QPrinter()
dlg = QPrintDialog(printer, self)
dlg.setWindowTitle(app.caption(_("Print")))
if dlg.exec_():
self.webview.print_(printer)
def slotShowContextMenu(self, pos):
hit = self.webview.page().currentFrame().hitTestContent(pos)
menu = QMenu()
if hit.linkUrl().isValid():
a = self.webview.pageAction(QWebPage.CopyLinkToClipboard)
a.setIcon(icons.get("edit-copy"))
a.setText(_("Copy &Link"))
menu.addAction(a)
menu.addSeparator()
a = menu.addAction(icons.get("window-new"), _("Open Link in &New Window"))
a.triggered.connect((lambda url: lambda: self.slotNewWindow(url))(hit.linkUrl()))
else:
if hit.isContentSelected():
a = self.webview.pageAction(QWebPage.Copy)
a.setIcon(icons.get("edit-copy"))
a.setText(_("&Copy"))
menu.addAction(a)
menu.addSeparator()
a = menu.addAction(icons.get("window-new"), _("Open Document in &New Window"))
a.triggered.connect((lambda url: lambda: self.slotNewWindow(url))(self.webview.url()))
if menu.actions():
menu.exec_(self.webview.mapToGlobal(pos))
def slotNewWindow(self, url):
helpers.openUrl(url)
class SearchEntry(widgets.lineedit.LineEdit):
"""A line edit that clears itself when ESC is pressed."""
def keyPressEvent(self, ev):
if ev.key() == Qt.Key_Escape:
if self.text():
self.clear()
else:
webview = self.parentWidget().parentWidget().webview
webview.setFocus()
webview.page().findText(None)
elif any(ev.matches(key) for key in (
QKeySequence.MoveToNextLine, QKeySequence.MoveToPreviousLine,
QKeySequence.MoveToNextPage, QKeySequence.MoveToPreviousPage,
)):
webview = self.parentWidget().parentWidget().webview
webview.keyPressEvent(ev)
else:
super(SearchEntry, self).keyPressEvent(ev)
| gpl-2.0 | 8,974,451,822,156,147,000 | 37.978648 | 98 | 0.619739 | false |
ftrimble/route-grower | pyroute/compress/compress.py | 1 | 4419 | #!/usr/bin/python
#----------------------------------------------------------------
#
#------------------------------------------------------
# Usage:
#
#------------------------------------------------------
# Copyright 2007, Oliver White
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#------------------------------------------------------
import sys
import os
from xml.sax import make_parser, handler
import xml
from struct import *
class BinaryOsm(handler.ContentHandler):
def __init__(self):
pass
def encode(self, filename, output):
self.nextKID = 3
self.nextVID = 1
self.tags = {}
self.values = {}
if(not os.path.exists(filename)):
print "No such data file %s" % filename
return
try:
self.out = open(output, "wb")
parser = make_parser()
parser.setContentHandler(self)
parser.parse(filename)
self.out.write("X")
self.out.close()
except xml.sax._exceptions.SAXParseException:
print "Error loading %s" % filename
def startElement(self, name, attrs):
"""Handle XML elements"""
if(name =='node'):
self.meta = { \
'id':int(attrs.get('id')),
'lon':float(attrs.get('lat')),
'lat':float(attrs.get('lon'))
}
self.tags = {}
elif(name == 'way'):
self.meta = {'id':int(attrs.get('id'))}
self.tags = {}
self.waynodes = []
elif(name == 'relation'):
self.tags = {}
elif name == 'nd':
"""Nodes within a way -- add them to a list"""
self.waynodes.append(int(attrs.get('ref')))
elif name == 'tag':
"""Tags - store them in a hash"""
k,v = (attrs.get('k'), attrs.get('v'))
if not k in ('created_by'):
self.tags[k] = v
def endElement(self, name):
"""Handle ways in the OSM data"""
writeTags = False
if(name =='node'):
data = 'N' + pack("L", self.meta['id']) + self.encodeLL(self.meta['lat'], self.meta['lon'])
self.out.write(data)
writeTags = True
elif(name == 'way'):
data = 'W' + pack("L", self.meta['id'])
self.out.write(data)
self.out.write(pack('H', len(self.waynodes)))
for n in self.waynodes:
self.out.write(pack('L', n))
writeTags = True
if(writeTags):
n = len(self.tags.keys())
if(n > 255):
# TODO:
print "Error: more than 255 tags on an item"
return
self.out.write(pack('B', n))
for k,v in self.tags.items():
self.encodeTag(k, False, k)
volatile = k in ('name','ref','ncn_ref','note','notes','description','ele','time','url','website','postal_code','image','source_ref','source:ref','source:name','source_ref:name',"FIXME","fixme","place_numbers")
self.encodeTag(v,volatile,k)
def encodeTag(self,text,volatile,key):
text = text.encode('utf8')
if(not volatile):
try:
ID = self.values[text]
self.out.write(pack('H', ID))
except KeyError:
if(self.nextKID >= 65535):
# TODO
print "Error: too many stored tags!"
sys.exit()
print "%d: %s %s" % (self.nextKID, key,text)
self.values[text] = self.nextKID
self.out.write(pack('HHB', 1, self.nextKID, len(text)))
self.out.write(text)
self.nextKID = self.nextKID + 1
else:
self.out.write(pack('HB', 0, len(text)))
self.out.write(text)
#print "Storing simple %s" % (text)
def encodeLL(self,lat,lon):
pLat = (lat + 90.0) / 180.0
pLon = (lon + 180.0) / 360.0
iLat = self.encodeP(pLat)
iLon = self.encodeP(pLon)
return(pack("II", iLat, iLon))
def encodeP(self,p):
i = int(p * 4294967296.0)
return(i)
# Parse the supplied OSM file
if __name__ == "__main__":
print "Loading data..."
Binary = BinaryOsm()
Binary.encode(sys.argv[1], sys.argv[2])
| apache-2.0 | 2,230,958,692,004,118,500 | 30.564286 | 218 | 0.562118 | false |
tp81/openmicroscopy | components/tools/OmeroWeb/omeroweb/webclient/show.py | 1 | 29181 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2014 University of Dundee & Open Microscopy Environment.
# All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Generic functionality for handling particular links and "showing" objects
in the OMERO.web tree view.
"""
import omero
import re
from omero.rtypes import rint, rlong
from django.core.urlresolvers import reverse
from copy import deepcopy
class IncorrectMenuError(Exception):
"""Exception to signal that we are on the wrong menu."""
def __init__(self, uri):
"""
Constructs a new Exception instance.
@param uri URI to redirect to.
@type uri String
"""
super(Exception, self).__init__()
self.uri = uri
class Show(object):
"""
This object is used by most of the top-level pages. The "show" and
"path" query strings are used by this object to both direct OMERO.web to
the correct locations in the hierarchy and select the correct objects
in that hierarchy.
"""
# List of prefixes that are at the top level of the tree
TOP_LEVEL_PREFIXES = ('project', 'screen')
# List of supported object types
SUPPORTED_OBJECT_TYPES = (
'project', 'dataset', 'image', 'screen', 'plate', 'tag',
'acquisition', 'run', 'well'
)
# Regular expression which declares the format for a "path" used either
# in the "path" or "show" query string. No modifications should be made
# to this regex without corresponding unit tests in
# "tests/unit/test_show.py".
PATH_REGEX = re.compile(
r'(?P<object_type>\w+)\.?(?P<key>\w+)?[-=](?P<value>[^\|]*)\|?'
)
# Regular expression for matching Well names
WELL_REGEX = re.compile(
'^(?:(?P<alpha_row>[a-zA-Z]+)(?P<digit_column>\d+))|'
'(?:(?P<digit_row>\d+)(?P<alpha_column>[a-zA-Z]+))$'
)
def __init__(self, conn, request, menu):
"""
Constructs a Show instance. The instance will not be fully
initialised until the first retrieval of the L{Show.first_selected}
property.
@param conn OMERO gateway.
@type conn L{omero.gateway.BlitzGateway}
@param request Django HTTP request.
@type request L{django.http.HttpRequest}
@param menu Literal representing the current menu we are on.
@type menu String
"""
# The list of "paths" ("type-id") we have been requested to
# show/select in the user interface. May be modified if one or
# more of the elements is not in the tree. This is currently the
# case for all Screen-Plate-Well hierarchy elements below Plate
# (Well for example).
self._initially_select = list()
# The nodes of the tree that will be initially open based on the
# nodes that are initially selected.
self._initially_open = None
# The owner of the node closest to the root of the tree from the
# list of initially open nodes.
self._initially_open_owner = None
# First selected node from the requested initially open "paths"
# that is first loaded on first retrieval of the "first_selected"
# property.
self._first_selected = None
self.conn = conn
self.request = request
self.menu = menu
path = self.request.REQUEST.get('path', '').split('|')[-1]
self._add_if_supported(path)
show = self.request.REQUEST.get('show', '')
for path in show.split('|'):
self._add_if_supported(path)
def _add_if_supported(self, path):
"""Adds a path to the initially selected list if it is supported."""
m = self.PATH_REGEX.match(path)
if m is None:
return
object_type = m.group('object_type')
key = m.group('key')
value = m.group('value')
if key is None:
key = 'id'
if object_type in self.SUPPORTED_OBJECT_TYPES:
# 'run' is an alternative for 'acquisition'
object_type = object_type.replace('run', 'acquisition')
self._initially_select.append(
'%s.%s-%s' % (object_type, key, value)
)
def _load_tag(self, attributes):
"""
Loads a Tag based on a certain set of attributes from the server.
@param attributes Set of attributes to filter on.
@type attributes L{dict}
"""
# Tags have an "Annotation" suffix added to the object name so
# need to be loaded differently.
return next(self.conn.getObjects(
"TagAnnotation", attributes=attributes
))
def get_well_row_column(self, well):
"""
Retrieves a tuple of row and column as L{int} for a given Well name
("A1" or "1A") string.
@param well Well name string to retrieve the row and column tuple for.
@type well L{str}
"""
m = self.WELL_REGEX.match(well)
if m is None:
return None
# We are using an algorithm that expects alpha columns and digit
# rows (like a spreadsheet). is_reversed will be True if those
# conditions are not met, signifying that the row and column
# calculated needs to be reversed before returning.
is_reversed = False
if m.group('alpha_row') is not None:
a = m.group('alpha_row').upper()
b = m.group('digit_column')
is_reversed = True
else:
a = m.group('alpha_column').upper()
b = m.group('digit_row')
# Convert base26 column string to number. Adapted from XlsxWriter:
# * https://github.com/jmcnamara/XlsxWriter
# * xlsxwriter/utility.py
n = 0
column = 0
for character in reversed(a):
column += (ord(character) - ord('A') + 1) * (26 ** n)
n += 1
# Convert 1-index to zero-index
row = int(b) - 1
column -= 1
if is_reversed:
return column, row
return row, column
def _load_well(self, attributes):
"""
Loads a Well based on a certain set of attributes from the server.
@param attributes Set of attributes to filter on.
@type attributes L{dict}
"""
if 'id' in attributes:
return self.conn.getObject('Well', attributes=attributes)
if 'name' in attributes:
row, column = self.get_well_row_column(attributes['name'])
path = self.request.REQUEST.get('path', '')
for m in self.PATH_REGEX.finditer(path):
object_type = m.group('object_type')
# May have 'run' here rather than 'acquisition' because
# the path will not have been validated and replaced.
if object_type not in ('plate', 'run', 'acquisition'):
continue
# 'run' is an alternative for 'acquisition'
object_type = object_type.replace('run', 'acquisition')
# Try and load the potential parent first
key = m.group('key')
value = m.group('value')
if key is None:
key = 'id'
if key == 'id':
value = long(value)
parent_attributes = {key: value}
parent, = self.conn.getObjects(
object_type, attributes=parent_attributes
)
# Now use the parent to try and locate the Well
query_service = self.conn.getQueryService()
params = omero.sys.ParametersI()
params.map['row'] = rint(row)
params.map['column'] = rint(column)
params.addId(parent.id)
if object_type == 'plate':
db_row, = query_service.projection(
'select w.id from Well as w '
'where w.row = :row and w.column = :column '
'and w.plate.id = :id', params, self.conn.SERVICE_OPTS
)
if object_type == 'acquisition':
db_row, = query_service.projection(
'select distinct w.id from Well as w '
'join w.wellSamples as ws '
'where w.row = :row and w.column = :column '
'and ws.plateAcquisition.id = :id',
params, self.conn.SERVICE_OPTS
)
well_id, = db_row
return self.conn.getObject(
'Well', well_id.val
)
def _load_first_selected(self, first_obj, attributes):
"""
Loads the first selected object from the server. Will raise
L{IncorrectMenuError} if the initialized menu was incorrect for
the loaded objects.
@param first_obj Type of the first selected object.
@type first_obj String
@param attributes Set of attributes to filter on.
@type attributes L{dict}
"""
first_selected = None
if first_obj == "tag":
first_selected = self._load_tag(attributes)
elif first_obj == "well":
first_selected = self._load_well(attributes)
else:
# All other objects can be loaded by type and attributes.
first_selected, = self.conn.getObjects(
first_obj, attributes=attributes
)
if first_obj == "well":
# Wells aren't in the tree, so we need to look up the parent
well_sample = first_selected.getWellSample()
parent_node = None
parent_type = None
# It's possible that the Well that we've been requested to show
# has no fields (WellSample instances). In that case the Plate
# will be used but we don't have much choice.
if well_sample is not None:
parent_node = well_sample.getPlateAcquisition()
parent_type = "acquisition"
if parent_node is None:
# No WellSample for this well, try and retrieve the
# PlateAcquisition from the parent Plate.
plate = first_selected.getParent()
try:
parent_node, = plate.listPlateAcquisitions()
parent_type = "acquisition"
except ValueError:
# No PlateAcquisition for this well, use Plate instead
parent_node = plate
parent_type = "plate"
# Tree hierarchy open to first selected "real" object available
# in the tree.
self._initially_open = [
"%s-%s" % (parent_type, parent_node.getId()),
"%s-%s" % (first_obj, first_selected.getId())
]
first_selected = parent_node
self._initially_select = self._initially_open[:]
else:
# Tree hierarchy open to first selected object.
self._initially_open = [
'%s-%s' % (first_obj, first_selected.getId())
]
# support for multiple objects selected by ID,
# E.g. show=image-1|image-2
if 'id' in attributes.keys() and len(self._initially_select) > 1:
# 'image.id-1' -> 'image-1'
self._initially_select = [
i.replace(".id", "") for i in self._initially_select]
else:
# Only select a single object
self._initially_select = self._initially_open[:]
self._initially_open_owner = first_selected.details.owner.id.val
return first_selected
def _find_first_selected(self):
"""Finds the first selected object."""
if len(self._initially_select) == 0:
return None
# tree hierarchy open to first selected object
m = self.PATH_REGEX.match(self._initially_select[0])
if m is None:
return None
first_obj = m.group('object_type')
# if we're showing a tag, make sure we're on the tags page...
if first_obj == "tag" and self.menu != "usertags":
# redirect to usertags/?show=tag-123
raise IncorrectMenuError(
reverse(viewname="load_template", args=['usertags']) +
"?show=" + self._initially_select[0].replace(".id", "")
)
first_selected = None
try:
key = m.group('key')
value = m.group('value')
if key == 'id':
value = long(value)
attributes = {key: value}
# Set context to 'cross-group'
self.conn.SERVICE_OPTS.setOmeroGroup('-1')
first_selected = self._load_first_selected(first_obj, attributes)
except:
pass
if first_obj not in self.TOP_LEVEL_PREFIXES:
# Need to see if first item has parents
if first_selected is not None:
for p in first_selected.getAncestry():
if first_obj == "tag":
# Parents of tags must be tags (no OMERO_CLASS)
self._initially_open.insert(0, "tag-%s" % p.getId())
else:
self._initially_open.insert(
0, "%s-%s" % (p.OMERO_CLASS.lower(), p.getId())
)
self._initially_open_owner = p.details.owner.id.val
m = self.PATH_REGEX.match(self._initially_open[0])
if m.group('object_type') == 'image':
self._initially_open.insert(0, "orphaned-0")
return first_selected
@property
def first_selected(self):
"""
Retrieves the first selected object. The first time this method is
invoked on the instance the actual retrieval is performed. All other
invocations retrieve the same instance without server interaction.
Will raise L{IncorrectMenuError} if the initialized menu was
incorrect for the loaded objects.
"""
if self._first_selected is None:
self._first_selected = self._find_first_selected()
return self._first_selected
@property
def initially_select(self):
"""
Retrieves the list of "paths" ("type-id") we have been requested to
show/select in the user interface. May be different than we were
first initialised with due to certain nodes of the Screen-Plate-Well
hierachy not being present in the tree. Should not be invoked until
after first retrieval of the L{Show.first_selected} property.
"""
return self._initially_select
@property
def initially_open(self):
"""
Retrieves the nodes of the tree that will be initially open based on
the nodes that are initially selected. Should not be invoked until
after first retrieval of the L{Show.first_selected} property.
"""
return self._initially_open
@property
def initially_open_owner(self):
"""
Retrieves the owner of the node closest to the root of the tree from
the list of initially open nodes. Should not be invoked until
after first retrieval of the L{Show.first_selected} property.
"""
return self._initially_open_owner
def paths_to_object(conn, experimenter_id=None, project_id=None,
dataset_id=None, image_id=None, screen_id=None,
plate_id=None, acquisition_id=None, well_id=None,
group_id=None):
# Set any of the parameters present and find the lowest type to find
# If path components are specified for incompatible paths, e.g. a dataset
# id and a screen id then the following priority is enforced for the
# object to find:
# image->dataset->project->well->acquisition->plate->screen->experimenter
# Note on wells:
# Selecting a 'well' is really for selecting well_sample paths
# if a well is specified on its own, we return all the well_sample paths
# than match
params = omero.sys.ParametersI()
service_opts = deepcopy(conn.SERVICE_OPTS)
lowest_type = None
if experimenter_id is not None:
params.add('eid', rlong(experimenter_id))
lowest_type = 'experimenter'
if screen_id is not None:
params.add('sid', rlong(screen_id))
lowest_type = 'screen'
if plate_id is not None:
params.add('plid', rlong(plate_id))
lowest_type = 'plate'
if acquisition_id is not None:
params.add('aid', rlong(acquisition_id))
lowest_type = 'acquisition'
if well_id is not None:
params.add('wid', rlong(well_id))
lowest_type = 'well'
if project_id is not None:
params.add('pid', rlong(project_id))
lowest_type = 'project'
if dataset_id is not None:
params.add('did', rlong(dataset_id))
lowest_type = 'dataset'
if image_id is not None:
params.add('iid', rlong(image_id))
lowest_type = 'image'
# If none of these parameters are set then there is nothing to find
if lowest_type is None:
return []
if group_id is not None:
service_opts.setOmeroGroup(group_id)
qs = conn.getQueryService()
# Hierarchies for this object
paths = []
# It is probably possible to write a more generic query instead
# of special casing each type, but it will be less readable and
# maintainable than these
if lowest_type == 'image':
q = '''
select coalesce(powner.id, downer.id, iowner.id),
pdlink.parent.id,
dilink.parent.id,
image.id
from Image image
left outer join image.details.owner iowner
left outer join image.datasetLinks dilink
left outer join dilink.parent.details.owner downer
left outer join dilink.parent.projectLinks pdlink
left outer join pdlink.parent.details.owner powner
where image.id = :iid
'''
where_clause = []
if dataset_id is not None:
where_clause.append('dilink.parent.id = :did')
if project_id is not None:
where_clause.append('pdlink.parent.id = :pid')
if experimenter_id is not None:
where_clause.append(
'coalesce(powner.id, downer.id, iowner.id) = :eid')
if len(where_clause) > 0:
q += ' and ' + ' and '.join(where_clause)
q += '''
order by coalesce(powner.id, downer.id, iowner.id),
pdlink.parent.id,
dilink.parent.id,
image.id
'''
for e in qs.projection(q, params, service_opts):
path = []
# Experimenter is always found
path.append({
'type': 'experimenter',
'id': e[0].val
})
# If it is experimenter->project->dataset->image
if e[1] is not None:
path.append({
'type': 'project',
'id': e[1].val
})
# If it is experimenter->dataset->image or
# experimenter->project->dataset->image
if e[2] is not None:
path.append({
'type': 'dataset',
'id': e[2].val
})
# If it is orphaned->image
if e[2] is None:
path.append({
'type': 'orphaned',
'id': e[0].val
})
# Image always present
path.append({
'type': 'image',
'id': e[3].val
})
paths.append(path)
elif lowest_type == 'dataset':
q = '''
select coalesce(powner.id, downer.id),
pdlink.parent.id,
dataset.id
from Dataset dataset
left outer join dataset.details.owner downer
left outer join dataset.projectLinks pdlink
left outer join pdlink.parent.details.owner powner
where dataset.id = :did
'''
where_clause = []
if project_id is not None:
where_clause.append('pdlink.parent.id = :pid')
if experimenter_id is not None:
where_clause.append('coalesce(powner.id, downer.id) = :eid')
if len(where_clause) > 0:
q += ' and ' + ' and '.join(where_clause)
for e in qs.projection(q, params, service_opts):
path = []
# Experimenter is always found
path.append({
'type': 'experimenter',
'id': e[0].val
})
# If it is experimenter->project->dataset
if e[1] is not None:
path.append({
'type': 'project',
'id': e[1].val
})
# Dataset always present
path.append({
'type': 'dataset',
'id': e[2].val
})
paths.append(path)
elif lowest_type == 'project':
q = '''
select project.details.owner.id,
project.id
from Project project
where project.id = :pid
'''
for e in qs.projection(q, params, service_opts):
path = []
# Always experimenter->project
path.append({
'type': 'experimenter',
'id': e[0].val
})
path.append({
'type': 'project',
'id': e[1].val
})
paths.append(path)
# This is basically the same as WellSample except that it is not
# restricted by a particular WellSample id
# May not have acquisition (load plate from well)
# We don't need to load the wellsample (not in tree)
elif lowest_type == 'well':
q = '''
select coalesce(sowner.id, plowner.id, aowner.id, wsowner.id),
slink.parent.id,
plate.id,
acquisition.id
from WellSample wellsample
left outer join wellsample.details.owner wsowner
left outer join wellsample.plateAcquisition acquisition
left outer join wellsample.details.owner aowner
join wellsample.well well
left outer join well.plate plate
left outer join plate.details.owner plowner
left outer join plate.screenLinks slink
left outer join slink.parent.details.owner sowner
where wellsample.well.id = :wid
'''
where_clause = []
if acquisition_id is not None:
where_clause.append('acquisition.id = :aid')
if plate_id is not None:
where_clause.append('plate.id = :plid')
if screen_id is not None:
where_clause.append('slink.parent.id = :sid')
if experimenter_id is not None:
where_clause.append(
'coalesce(sowner.id, plowner.id, aoener.id, wowner.id) = :eid')
if len(where_clause) > 0:
q += ' and ' + ' and '.join(where_clause)
for e in qs.projection(q, params, service_opts):
path = []
# Experimenter is always found
path.append({
'type': 'experimenter',
'id': e[0].val
})
# If it is experimenter->screen->plate->acquisition->wellsample
if e[1] is not None:
path.append({
'type': 'screen',
'id': e[1].val
})
# Plate should always present
path.append({
'type': 'plate',
'id': e[2].val
})
# Acquisition not present if plate created via API (not imported)
if e[3] is not None:
path.append({
'type': 'acquisition',
'id': e[3].val
})
paths.append(path)
elif lowest_type == 'acquisition':
q = '''
select coalesce(sowner.id, plowner.id, aowner.id),
slink.parent.id,
plate.id,
acquisition.id
from PlateAcquisition acquisition
left outer join acquisition.details.owner aowner
left outer join acquisition.plate plate
left outer join plate.details.owner plowner
left outer join plate.screenLinks slink
left outer join slink.parent.details.owner sowner
where acquisition.id = :aid
'''
where_clause = []
if plate_id is not None:
where_clause.append('plate.id = :plid')
if screen_id is not None:
where_clause.append('slink.parent.id = :sid')
if experimenter_id is not None:
where_clause.append(
'coalesce(sowner.id, plowner.id, aowner.id) = :eid')
if len(where_clause) > 0:
q += ' and ' + ' and '.join(where_clause)
for e in qs.projection(q, params, service_opts):
path = []
# Experimenter is always found
path.append({
'type': 'experimenter',
'id': e[0].val
})
# If it is experimenter->screen->plate->acquisition
if e[1] is not None:
path.append({
'type': 'screen',
'id': e[1].val
})
# If it is experimenter->plate->acquisition or
# experimenter->screen->plate->acquisition
if e[2] is not None:
path.append({
'type': 'plate',
'id': e[2].val
})
# Acquisition always present
path.append({
'type': 'acquisition',
'id': e[3].val
})
paths.append(path)
elif lowest_type == 'plate':
q = '''
select coalesce(sowner.id, plowner.id),
splink.parent.id,
plate.id
from Plate plate
left outer join plate.details.owner sowner
left outer join plate.screenLinks splink
left outer join splink.parent.details.owner plowner
where plate.id = :plid
'''
where_clause = []
if screen_id is not None:
where_clause.append('splink.parent.id = :sid')
if experimenter_id is not None:
where_clause.append('coalesce(sowner.id, plowner.id) = :eid')
if len(where_clause) > 0:
q += ' and ' + ' and '.join(where_clause)
for e in qs.projection(q, params, service_opts):
path = []
# Experimenter is always found
path.append({
'type': 'experimenter',
'id': e[0].val
})
# If it is experimenter->screen->plate
if e[1] is not None:
path.append({
'type': 'screen',
'id': e[1].val
})
# Plate always present
path.append({
'type': 'plate',
'id': e[2].val
})
paths.append(path)
elif lowest_type == 'screen':
q = '''
select screen.details.owner.id,
screen.id
from Screen screen
where screen.id = :sid
'''
for e in qs.projection(q, params, service_opts):
path = []
# Always experimenter->screen
path.append({
'type': 'experimenter',
'id': e[0].val
})
path.append({
'type': 'screen',
'id': e[1].val
})
paths.append(path)
elif lowest_type == 'experimenter':
path = []
# No query required here as this is the highest level container
path.append({
'type': 'experimenter',
'id': experimenter_id
})
paths.append(path)
return paths
| gpl-2.0 | -3,020,381,144,042,495,000 | 35.204715 | 79 | 0.533532 | false |
DomNomNom/Cardo | Cardo.py | 1 | 4736 |
import sys, select # for timing out input
'''
This is the overall idea of the game's main logic:
stack = [] # last value is the top of the stack
while not gameOver:
if stack is empty:
stack = [ nextEvent() ]
# stack now has at least one element
top = stack[-1]
imminent(top) # this may modify the stack
if stack and stack[-1] == top: # nothing else wants to precede us
top.apply() # this may modify gameOver
'''
def imminent(event):
# see whether there are any cards that want to be used here
pass
def stackAppend(event):
global stack
stack.append(event)
# ====== Events ======
class Event(object):
def apply(self):
pass
class GameStarted(Event):
pass
class TurnStart(Event):
def __init__(self, player):
self.player = player
def apply(self):
global currentPlayer
currentPlayer = self.player
print 'It is now {0}s turn.'.format(self.player)
class TurnEnd(Event):
def __init__(self, player):
self.player = player
def apply(self):
global currentPlayer
currentPlayer = None
class PlayerTimeOut(Event):
def __init__(self, player):
self.player = player
def apply(self):
print str(self.player) + ' timed out.'
class PlayerWin(Event):
def __init__(self, player):
self.player = player
def apply(self):
global winner
winner = self.player
stackAppend(GameOver()) # That's right, we don't even directly set it here
class PlayerDrawCard(Event):
def __init__(self, player):
self.player = player
def apply(self):
pass # TODO: cards
class UnitNoHealth(Event):
def __init__(self, unit):
self.unit = unit
def apply(self):
stackAppend(UnitDeath(self.unit))
class UnitTakeDamadge(Event):
def __init__(self, *args):
self.unit, self.damadge = args
def apply(self):
stackAppend(UnitHealthChanged(self.unit, -self.damadge))
class UnitHealthChanged(Event):
def __init__(self, *args):
self.unit, self.change = args
def apply(self):
self.unit.health += self.change
if self.unit.health <= 0:
self.unit.onNoHealth()
class UnitNoHealth(Event):
def __init__(self, unit):
self.unit = unit
def apply(self):
self.unit.die()
class GameOver(Event):
def apply(self):
global gameOver
gameOver = True
print 'game over man, game over!'
# ====== Units ======
# A unit is anything that has health and dies when it's health goes to, or below zero
class Unit(object):
def __init__(self):
self.health = 0
def onNoHealth(self):
stackAppend(UnitNoHealth(self))
def die(self):
print str(self) + ' died.'
pass
class Player(Unit):
def __init__(self, name):
self.name = name
self.health = 30
def __str__(self):
return '{0}(health:{1})'.format(self.name, self.health)
def die(self):
stackAppend(GameOver())
# returns an Event within a finite time
def playerControl(player):
timeout = 10
print "You have {0} seconds to answer!".format(timeout)
# TODO: allow for multiple choices
# select allows for a timout
# stackoverflow.com/questions/1335507/
inputStreams, ignored, ignored2 = select.select([sys.stdin], [], [], timeout)
if (inputStreams):
playerInput = sys.stdin.readline()
print "echo: ", playerInput
# TODO: actually use the playerInput
else:
yield PlayerTimeOut(player)
# a infinite iterator returning Events in finite time
def makeGameEvents():
global gameOver, players
yield GameStarted()
while True:
for player in players:
yield TurnStart(player)
yield UnitTakeDamadge(player, 10)
yield PlayerDrawCard(player)
for event in playerControl(player):
yield event
yield TurnEnd(player)
# global variables
stack = []
currentPlayer = None
winner = None
gameEvents = makeGameEvents()
gameOver = False
players = [ Player('Player 1'), Player('Player 2')]
while not gameOver:
# safeguard for cards interrupting each other
if len(stack) > 9000:
stack = []
print 'the stack is too large. moving on to the next event'
if not stack:
stack = [ gameEvents.next() ]
# stack now has at least one element
top = stack[-1]
# print 'processing event: ' + str(top)
imminent(top) # this may modify the stack
if stack and stack[-1] == top: # nothing else wants to precede us
stack.pop()
top.apply() # this may modify gameOver
print str(winner) + ' wins!'
| gpl-3.0 | 6,443,879,362,117,278,000 | 22.919192 | 85 | 0.614231 | false |
Subsets and Splits